summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/aoe/udev.txt2
-rw-r--r--Documentation/block/cmdline-partition.txt39
-rw-r--r--Documentation/device-mapper/cache.txt6
-rw-r--r--Documentation/device-mapper/statistics.txt186
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt15
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt7
-rw-r--r--Documentation/devicetree/bindings/dma/k3dma.txt46
-rw-r--r--Documentation/devicetree/bindings/dma/shdma.txt61
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/can/sja1000.txt2
-rw-r--r--Documentation/devicetree/bindings/power_supply/msm-poweroff.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-omap.txt6
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-palmas.txt33
-rw-r--r--Documentation/dma-buf-sharing.txt12
-rw-r--r--Documentation/driver-model/devres.txt3
-rw-r--r--Documentation/filesystems/proc.txt19
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.txt4
-rw-r--r--Documentation/kbuild/kconfig-language.txt1
-rw-r--r--Documentation/kbuild/kconfig.txt8
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/i40e.txt115
-rw-r--r--Documentation/sysctl/kernel.txt1
-rw-r--r--Documentation/sysctl/vm.txt30
-rw-r--r--Documentation/vm/hugetlbpage.txt25
-rw-r--r--Documentation/vm/soft-dirty.txt7
-rw-r--r--MAINTAINERS137
-rw-r--r--arch/alpha/lib/csum_partial_copy.c5
-rw-r--r--arch/arc/kernel/devtree.c6
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/common/edma.c17
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c57
-rw-r--r--arch/arm/mach-imx/mm-imx25.c17
-rw-r--r--arch/arm/mach-imx/mm-imx5.c14
-rw-r--r--arch/arm/mm/hugetlbpage.c5
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/xen/enlighten.c14
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/mm/hugetlbpage.c5
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/c6x/kernel/devicetree.c8
-rw-r--r--arch/cris/Kconfig81
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig70
-rw-r--r--arch/cris/arch-v10/drivers/Makefile2
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig394
-rw-r--r--arch/cris/arch-v32/mach-a3/Kconfig4
-rw-r--r--arch/cris/include/asm/processor.h1
-rw-r--r--arch/cris/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/metag/mm/hugetlbpage.c5
-rw-r--r--arch/metag/mm/init.c5
-rw-r--r--arch/microblaze/kernel/prom.c8
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/mm/hugetlbpage.c5
-rw-r--r--arch/mn10300/kernel/entry.S6
-rw-r--r--arch/openrisc/kernel/prom.c8
-rw-r--r--arch/powerpc/kernel/prom.c8
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/mm/fault.c13
-rw-r--r--arch/powerpc/mm/hugetlbpage.c10
-rw-r--r--arch/powerpc/platforms/pseries/setup.c31
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/defconfig39
-rw-r--r--arch/s390/include/asm/irq.h12
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/kernel/compat_linux.c9
-rw-r--r--arch/s390/kernel/compat_signal.c10
-rw-r--r--arch/s390/kernel/crash_dump.c219
-rw-r--r--arch/s390/kernel/dumpstack.c20
-rw-r--r--arch/s390/kernel/entry.h18
-rw-r--r--arch/s390/kernel/ftrace.c5
-rw-r--r--arch/s390/kernel/irq.c85
-rw-r--r--arch/s390/kernel/kprobes.c144
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/perf_event.c5
-rw-r--r--arch/s390/kernel/runtime_instr.c4
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/suspend.c1
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c5
-rw-r--r--arch/s390/mm/maccess.c1
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c6
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c89
-rw-r--r--arch/sh/mm/hugetlbpage.c5
-rw-r--r--arch/sparc/kernel/sys_sparc32.c12
-rw-r--r--arch/sparc/mm/hugetlbpage.c5
-rw-r--r--arch/tile/mm/hugetlbpage.c5
-rw-r--r--arch/x86/include/asm/jump_label.h9
-rw-r--r--arch/x86/include/asm/pgtable.h34
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/tlbflush.h37
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kernel/early-quirks.c154
-rw-r--r--arch/x86/kernel/jump_label.c70
-rw-r--r--arch/x86/mm/hugetlbpage.c8
-rw-r--r--arch/x86/mm/tlb.c14
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/smp.c28
-rw-r--r--arch/x86/xen/spinlock.c45
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--block/Kconfig6
-rw-r--r--block/Makefile1
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/cmdline-parser.c250
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/partitions/Kconfig7
-rw-r--r--block/partitions/Makefile1
-rw-r--r--block/partitions/check.c4
-rw-r--r--block/partitions/cmdline.c99
-rw-r--r--block/partitions/cmdline.h2
-rw-r--r--block/partitions/efi.c171
-rw-r--r--block/partitions/efi.h38
-rw-r--r--drivers/base/dma-buf.c32
-rw-r--r--drivers/block/aoe/aoe.h4
-rw-r--r--drivers/block/aoe/aoeblk.c100
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c10
-rw-r--r--drivers/block/cciss.c7
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/pktcdvd.c278
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c60
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/amba-pl08x.c501
-rw-r--r--drivers/dma/dmaengine.c28
-rw-r--r--drivers/dma/dw/core.c39
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/edma.c164
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsldma.c10
-rw-r--r--drivers/dma/imx-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c179
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c837
-rw-r--r--drivers/dma/mmp_pdma.c320
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c10
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mxs-dma.c27
-rw-r--r--drivers/dma/of-dma.c3
-rw-r--r--drivers/dma/pch_dma.c10
-rw-r--r--drivers/dma/pl330.c179
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/Makefile6
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c655
-rw-r--r--drivers/dma/sh/shdma-arm.h51
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c77
-rw-r--r--drivers/dma/sh/shdma.h16
-rw-r--r--drivers/dma/sh/shdmac.c (renamed from drivers/dma/sh/shdma.c)160
-rw-r--r--drivers/dma/sh/sudmac.c22
-rw-r--r--drivers/dma/sirf-dma.c134
-rw-r--r--drivers/dma/ste_dma40.c23
-rw-r--r--drivers/dma/tegra20-apb-dma.c8
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c22
-rw-r--r--drivers/firmware/dmi_scan.c73
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c83
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/vga/vgaarb.c51
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/ina2xx.c3
-rw-r--r--drivers/iommu/msm_iommu_dev.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/dm-cache-target.c59
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-ioctl.c60
-rw-r--r--drivers/md/dm-kcopyd.c3
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-stats.c969
-rw-r--r--drivers/md/dm-stats.h40
-rw-r--r--drivers/md/dm-stripe.c1
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm-target.c9
-rw-r--r--drivers/md/dm-thin.c122
-rw-r--r--drivers/md/dm.c70
-rw-r--r--drivers/md/dm.h27
-rw-r--r--drivers/md/md.c54
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c5
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h5
-rw-r--r--drivers/md/persistent-data/dm-btree.c28
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c77
-rw-r--r--drivers/md/raid5.c362
-rw-r--r--drivers/md/raid5.h22
-rw-r--r--drivers/memstick/core/Kconfig12
-rw-r--r--drivers/memstick/core/Makefile2
-rw-r--r--drivers/memstick/core/ms_block.c2385
-rw-r--r--drivers/memstick/core/ms_block.h290
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/mmc/card/block.c47
-rw-r--r--drivers/mmc/card/mmc_test.c14
-rw-r--r--drivers/mmc/core/core.c44
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c13
-rw-r--r--drivers/mmc/core/slot-gpio.c14
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c34
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c4
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c21
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c49
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c22
-rw-r--r--drivers/mmc/host/of_mmc_spi.c46
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c8
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci.c7
-rw-r--r--drivers/mmc/host/sh_mmcif.c63
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c23
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c24
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_sysfs.c31
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h558
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c983
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h112
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2041
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1449
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c366
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h245
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1006
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7375
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c391
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h239
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h4688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1817
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1154
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h368
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2335
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h120
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c130
-rw-r--r--drivers/of/base.c200
-rw-r--r--drivers/of/fdt.c98
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_net.c2
-rw-r--r--drivers/of/platform.c21
-rw-r--r--drivers/platform/x86/apple-gmux.c18
-rw-r--r--drivers/pnp/driver.c13
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab8500_charger.c1
-rw-r--r--drivers/power/bq24190_charger.c1549
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/max8925_power.c1
-rw-r--r--drivers/power/power_supply_core.c38
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/msm-poweroff.c73
-rw-r--r--drivers/power/reset/xgene-reboot.c103
-rw-r--r--drivers/power/rx51_battery.c14
-rw-r--r--drivers/power/tosa_battery.c2
-rw-r--r--drivers/power/twl4030_charger.c7
-rw-r--r--drivers/power/twl4030_madc_battery.c245
-rw-r--r--drivers/pps/clients/pps-gpio.c1
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c24
-rw-r--r--drivers/rtc/rtc-ds1511.c17
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c26
-rw-r--r--drivers/rtc/rtc-ep93xx.c14
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c22
-rw-r--r--drivers/rtc/rtc-imxdi.c16
-rw-r--r--drivers/rtc/rtc-lpc32xx.c24
-rw-r--r--drivers/rtc/rtc-max77686.c4
-rw-r--r--drivers/rtc/rtc-moxart.c330
-rw-r--r--drivers/rtc/rtc-mv.c17
-rw-r--r--drivers/rtc/rtc-mxc.c14
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c60
-rw-r--r--drivers/rtc/rtc-palmas.c35
-rw-r--r--drivers/rtc/rtc-pcf2127.c6
-rw-r--r--drivers/rtc/rtc-sirfsoc.c13
-rw-r--r--drivers/rtc/rtc-stk17ta8.c15
-rw-r--r--drivers/rtc/rtc-tx4939.c14
-rw-r--r--drivers/s390/block/dasd_diag.c4
-rw-r--r--drivers/s390/char/fs3270.c6
-rw-r--r--drivers/s390/char/sclp.c6
-rw-r--r--drivers/s390/char/tty3270.c6
-rw-r--r--drivers/s390/char/zcore.c6
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/video/acornfb.c266
-rw-r--r--drivers/video/acornfb.h29
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/dir.c24
-rw-r--r--fs/autofs4/dev-ioctl.c23
-rw-r--r--fs/bio-integrity.c9
-rw-r--r--fs/coredump.c5
-rw-r--r--fs/dcache.c220
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c122
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/file_table.c3
-rw-r--r--fs/fs-writeback.c12
-rw-r--r--fs/fscache/page.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/hfsplus/Kconfig18
-rw-r--r--fs/hfsplus/Makefile2
-rw-r--r--fs/hfsplus/acl.h30
-rw-r--r--fs/hfsplus/dir.c4
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/inode.c11
-rw-r--r--fs/hfsplus/posix_acl.c274
-rw-r--r--fs/hfsplus/xattr.c62
-rw-r--r--fs/hfsplus/xattr.h33
-rw-r--r--fs/hfsplus/xattr_security.c13
-rw-r--r--fs/internal.h3
-rw-r--r--fs/namei.c117
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfsd/nfs4recover.c2
-rw-r--r--fs/nfsd/nfs4state.c33
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c32
-rw-r--r--fs/ocfs2/cluster/tcp.c60
-rw-r--r--fs/ocfs2/dlm/dlmast.c8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c18
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c15
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c35
-rw-r--r--fs/ocfs2/dlm/dlmlock.c9
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c18
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c13
-rw-r--r--fs/ocfs2/dlm/dlmthread.c19
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c3
-rw-r--r--fs/ocfs2/extent_map.c11
-rw-r--r--fs/ocfs2/file.c7
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/journal.c43
-rw-r--r--fs/ocfs2/journal.h11
-rw-r--r--fs/ocfs2/localalloc.c4
-rw-r--r--fs/ocfs2/move_extents.c3
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/quota_global.c6
-rw-r--r--fs/ocfs2/quota_local.c12
-rw-r--r--fs/ocfs2/refcounttree.c10
-rw-r--r--fs/ocfs2/xattr.c11
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/task_mmu.c50
-rw-r--r--fs/proc/vmcore.c154
-rw-r--r--fs/ramfs/inode.c26
-rw-r--r--fs/squashfs/block.c11
-rw-r--r--fs/squashfs/dir.c17
-rw-r--r--fs/squashfs/namei.c8
-rw-r--r--fs/squashfs/squashfs_fs.h5
-rw-r--r--fs/super.c2
-rw-r--r--include/drm/i915_drm.h34
-rw-r--r--include/drm/i915_pciids.h211
-rw-r--r--include/dt-bindings/input/input.h525
-rw-r--r--include/linux/amba/pl080.h1
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/cmdline-parser.h43
-rw-r--r--include/linux/compat.h1
-rw-r--r--include/linux/crash_dump.h9
-rw-r--r--include/linux/device-mapper.h9
-rw-r--r--include/linux/dma/mmp-pdma.h15
-rw-r--r--include/linux/dmaengine.h37
-rw-r--r--include/linux/eventfd.h3
-rw-r--r--include/linux/fsl/mxs-dma.h20
-rw-r--r--include/linux/genalloc.h4
-rw-r--r--include/linux/hugetlb.h25
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/kprobes.h34
-rw-r--r--include/linux/lz4.h8
-rw-r--r--include/linux/math64.h13
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mempolicy.h11
-rw-r--r--include/linux/mfd/tmio.h1
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mm.h32
-rw-r--r--include/linux/mm_inline.h1
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/mmc/sh_mmcif.h6
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h2
-rw-r--r--include/linux/mmc/slot-gpio.h3
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/namei.h4
-rw-r--r--include/linux/of.h49
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_net.h4
-rw-r--r--include/linux/platform_data/dma-rcar-hpbdma.h103
-rw-r--r--include/linux/platform_data/edma.h2
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/power/twl4030_madc_battery.h39
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/radix-tree.h1
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/ramfs.h2
-rw-r--r--include/linux/rbtree.h22
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/sh_dma.h55
-rw-r--r--include/linux/shdma-base.h3
-rw-r--r--include/linux/smp.h79
-rw-r--r--include/linux/spi/mmc_spi.h19
-rw-r--r--include/linux/sunrpc/cache.h22
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/swap.h52
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/vgaarb.h7
-rw-r--r--include/linux/vm_event_item.h6
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/net/9p/client.h5
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/trace/events/kmem.h10
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--init/Kconfig1
-rw-r--r--init/do_mounts.c45
-rw-r--r--ipc/msg.c25
-rw-r--r--ipc/namespace.c7
-rw-r--r--ipc/sem.c24
-rw-r--r--ipc/shm.c255
-rw-r--r--ipc/util.c82
-rw-r--r--ipc/util.h14
-rw-r--r--kernel/cgroup.c33
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kprobes.c95
-rw-r--r--kernel/modsign_pubkey.c6
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/power/snapshot.c12
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/smp.c16
-rw-r--r--kernel/spinlock.c14
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/task_work.c40
-rw-r--r--kernel/up.c58
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/crc32.c17
-rw-r--r--lib/decompress_inflate.c2
-rw-r--r--lib/div64.c40
-rw-r--r--lib/genalloc.c22
-rw-r--r--lib/lz4/lz4_decompress.c8
-rw-r--r--lib/radix-tree.c41
-rw-r--r--lib/raid6/Makefile6
-rw-r--r--lib/raid6/algos.c3
-rw-r--r--lib/raid6/test/Makefile9
-rw-r--r--lib/raid6/tilegx.uc86
-rw-r--r--lib/rbtree.c40
-rw-r--r--lib/rbtree_test.c12
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/compaction.c3
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/hugetlb.c447
-rw-r--r--mm/hwpoison-inject.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/madvise.c33
-rw-r--r--mm/memblock.c18
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/memory-failure.c174
-rw-r--r--mm/memory.c41
-rw-r--r--mm/memory_hotplug.c112
-rw-r--r--mm/mempolicy.c116
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/migrate.c63
-rw-r--r--mm/mlock.c316
-rw-r--r--mm/mmap.c59
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/page-writeback.c269
-rw-r--r--mm/page_alloc.c308
-rw-r--r--mm/page_isolation.c14
-rw-r--r--mm/pgtable-generic.c24
-rw-r--r--mm/readahead.c8
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/slub.c8
-rw-r--r--mm/sparse.c133
-rw-r--r--mm/swap.c77
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/swapfile.c596
-rw-r--r--mm/util.c5
-rw-r--r--mm/vmalloc.c29
-rw-r--r--mm/vmscan.c80
-rw-r--r--mm/vmstat.c95
-rw-r--r--mm/zbud.c4
-rw-r--r--mm/zswap.c18
-rw-r--r--net/9p/client.c5
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/exthdrs.c6
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ndisc.c18
-rw-r--r--net/openvswitch/flow.c1
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/socket.c50
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c26
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c41
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.h5
-rwxr-xr-xscripts/checkpatch.pl345
-rwxr-xr-xscripts/config44
-rwxr-xr-xscripts/diffconfig33
-rw-r--r--scripts/kconfig/confdata.c11
-rw-r--r--scripts/kconfig/mconf.c4
-rw-r--r--scripts/kconfig/menu.c11
-rw-r--r--scripts/kconfig/nconf.c4
-rw-r--r--scripts/kconfig/symbol.c68
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped562
-rw-r--r--scripts/kconfig/zconf.y11
592 files changed, 50008 insertions, 6127 deletions
diff --git a/Documentation/aoe/udev.txt b/Documentation/aoe/udev.txt
index 8686e789542..1f06daf03f5 100644
--- a/Documentation/aoe/udev.txt
+++ b/Documentation/aoe/udev.txt
@@ -23,4 +23,4 @@ SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="02
SUBSYSTEM=="aoe", KERNEL=="flush", NAME="etherd/%k", GROUP="disk", MODE="0220"
# aoe block devices
-KERNEL=="etherd*", NAME="%k", GROUP="disk"
+KERNEL=="etherd*", GROUP="disk"
diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt
new file mode 100644
index 00000000000..2bbf4cc40c3
--- /dev/null
+++ b/Documentation/block/cmdline-partition.txt
@@ -0,0 +1,39 @@
+Embedded device command line partition
+=====================================================================
+
+Read block device partition table from command line.
+The partition used for fixed block device (eMMC) embedded device.
+It is no MBR, save storage space. Bootloader can be easily accessed
+by absolute address of data on the block device.
+Users can easily change the partition.
+
+The format for the command line is just like mtdparts:
+
+blkdevparts=<blkdev-def>[;<blkdev-def>]
+ <blkdev-def> := <blkdev-id>:<partdef>[,<partdef>]
+ <partdef> := <size>[@<offset>](part-name)
+
+<blkdev-id>
+ block device disk name, embedded device used fixed block device,
+ it's disk name also fixed. such as: mmcblk0, mmcblk1, mmcblk0boot0.
+
+<size>
+ partition size, in bytes, such as: 512, 1m, 1G.
+
+<offset>
+ partition start address, in bytes.
+
+(part-name)
+ partition name, kernel send uevent with "PARTNAME". application can create
+ a link to block device partition with the name "PARTNAME".
+ user space application can access partition by partition name.
+
+Example:
+ eMMC disk name is "mmcblk0" and "mmcblk0boot0"
+
+ bootargs:
+ 'blkdevparts=mmcblk0:1G(data0),1G(data1),-;mmcblk0boot0:1m(boot),-(kernel)'
+
+ dmesg:
+ mmcblk0: p1(data0) p2(data1) p3()
+ mmcblk0boot0: p1(boot) p2(kernel)
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e8cdf7241b6..33d45ee0b73 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -50,14 +50,16 @@ other parameters detailed later):
which are dirty, and extra hints for use by the policy object.
This information could be put on the cache device, but having it
separate allows the volume manager to configure it differently,
- e.g. as a mirror for extra robustness.
+ e.g. as a mirror for extra robustness. This metadata device may only
+ be used by a single cache device.
Fixed block size
----------------
The origin is divided up into blocks of a fixed size. This block size
is configurable when you first create the cache. Typically we've been
-using block sizes of 256k - 1024k.
+using block sizes of 256KB - 1024KB. The block size must be between 64
+(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
Having a fixed block size simplifies the target a lot. But it is
something of a compromise. For instance, a small part of a block may be
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
new file mode 100644
index 00000000000..2a1673adc20
--- /dev/null
+++ b/Documentation/device-mapper/statistics.txt
@@ -0,0 +1,186 @@
+DM statistics
+=============
+
+Device Mapper supports the collection of I/O statistics on user-defined
+regions of a DM device. If no regions are defined no statistics are
+collected so there isn't any performance impact. Only bio-based DM
+devices are currently supported.
+
+Each user-defined region specifies a starting sector, length and step.
+Individual statistics will be collected for each step-sized area within
+the range specified.
+
+The I/O statistics counters for each step-sized area of a region are
+in the same format as /sys/block/*/stat or /proc/diskstats (see:
+Documentation/iostats.txt). But two extra counters (12 and 13) are
+provided: total time spent reading and writing in milliseconds. All
+these counters may be accessed by sending the @stats_print message to
+the appropriate DM device via dmsetup.
+
+Each region has a corresponding unique identifier, which we call a
+region_id, that is assigned when the region is created. The region_id
+must be supplied when querying statistics about the region, deleting the
+region, etc. Unique region_ids enable multiple userspace programs to
+request and process statistics for the same DM device without stepping
+on each other's data.
+
+The creation of DM statistics will allocate memory via kmalloc or
+fallback to using vmalloc space. At most, 1/4 of the overall system
+memory may be allocated by DM statistics. The admin can see how much
+memory is used by reading
+/sys/module/dm_mod/parameters/stats_current_allocated_bytes
+
+Messages
+========
+
+ @stats_create <range> <step> [<program_id> [<aux_data>]]
+
+ Create a new region and return the region_id.
+
+ <range>
+ "-" - whole device
+ "<start_sector>+<length>" - a range of <length> 512-byte sectors
+ starting with <start_sector>.
+
+ <step>
+ "<area_size>" - the range is subdivided into areas each containing
+ <area_size> sectors.
+ "/<number_of_areas>" - the range is subdivided into the specified
+ number of areas.
+
+ <program_id>
+ An optional parameter. A name that uniquely identifies
+ the userspace owner of the range. This groups ranges together
+ so that userspace programs can identify the ranges they
+ created and ignore those created by others.
+ The kernel returns this string back in the output of
+ @stats_list message, but it doesn't use it for anything else.
+
+ <aux_data>
+ An optional parameter. A word that provides auxiliary data
+ that is useful to the client program that created the range.
+ The kernel returns this string back in the output of
+ @stats_list message, but it doesn't use this value for anything.
+
+ @stats_delete <region_id>
+
+ Delete the region with the specified id.
+
+ <region_id>
+ region_id returned from @stats_create
+
+ @stats_clear <region_id>
+
+ Clear all the counters except the in-flight i/o counters.
+
+ <region_id>
+ region_id returned from @stats_create
+
+ @stats_list [<program_id>]
+
+ List all regions registered with @stats_create.
+
+ <program_id>
+ An optional parameter.
+ If this parameter is specified, only matching regions
+ are returned.
+ If it is not specified, all regions are returned.
+
+ Output format:
+ <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+
+ @stats_print <region_id> [<starting_line> <number_of_lines>]
+
+ Print counters for each step-sized area of a region.
+
+ <region_id>
+ region_id returned from @stats_create
+
+ <starting_line>
+ The index of the starting line in the output.
+ If omitted, all lines are returned.
+
+ <number_of_lines>
+ The number of lines to include in the output.
+ If omitted, all lines are returned.
+
+ Output format for each step-sized area of a region:
+
+ <start_sector>+<length> counters
+
+ The first 11 counters have the same meaning as
+ /sys/block/*/stat or /proc/diskstats.
+
+ Please refer to Documentation/iostats.txt for details.
+
+ 1. the number of reads completed
+ 2. the number of reads merged
+ 3. the number of sectors read
+ 4. the number of milliseconds spent reading
+ 5. the number of writes completed
+ 6. the number of writes merged
+ 7. the number of sectors written
+ 8. the number of milliseconds spent writing
+ 9. the number of I/Os currently in progress
+ 10. the number of milliseconds spent doing I/Os
+ 11. the weighted number of milliseconds spent doing I/Os
+
+ Additional counters:
+ 12. the total time spent reading in milliseconds
+ 13. the total time spent writing in milliseconds
+
+ @stats_print_clear <region_id> [<starting_line> <number_of_lines>]
+
+ Atomically print and then clear all the counters except the
+ in-flight i/o counters. Useful when the client consuming the
+ statistics does not want to lose any statistics (those updated
+ between printing and clearing).
+
+ <region_id>
+ region_id returned from @stats_create
+
+ <starting_line>
+ The index of the starting line in the output.
+ If omitted, all lines are printed and then cleared.
+
+ <number_of_lines>
+ The number of lines to process.
+ If omitted, all lines are printed and then cleared.
+
+ @stats_set_aux <region_id> <aux_data>
+
+ Store auxiliary data aux_data for the specified region.
+
+ <region_id>
+ region_id returned from @stats_create
+
+ <aux_data>
+ The string that identifies data which is useful to the client
+ program that created the range. The kernel returns this
+ string back in the output of @stats_list message, but it
+ doesn't use this value for anything.
+
+Examples
+========
+
+Subdivide the DM device 'vol' into 100 pieces and start collecting
+statistics on them:
+
+ dmsetup message vol 0 @stats_create - /100
+
+Set the auxillary data string to "foo bar baz" (the escape for each
+space must also be escaped, otherwise the shell will consume them):
+
+ dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz
+
+List the statistics:
+
+ dmsetup message vol 0 @stats_list
+
+Print the statistics:
+
+ dmsetup message vol 0 @stats_print 0
+
+Delete the statistics:
+
+ dmsetup message vol 0 @stats_delete 0
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 30b8b83bd33..50c44cf79b0 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -99,13 +99,14 @@ Using an existing pool device
$data_block_size $low_water_mark"
$data_block_size gives the smallest unit of disk space that can be
-allocated at a time expressed in units of 512-byte sectors. People
-primarily interested in thin provisioning may want to use a value such
-as 1024 (512KB). People doing lots of snapshotting may want a smaller value
-such as 128 (64KB). If you are not zeroing newly-allocated data,
-a larger $data_block_size in the region of 256000 (128MB) is suggested.
-$data_block_size must be the same for the lifetime of the
-metadata device.
+allocated at a time expressed in units of 512-byte sectors.
+$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
+multiple of 128 (64KB). $data_block_size cannot be changed after the
+thin-pool is created. People primarily interested in thin provisioning
+may want to use a value such as 1024 (512KB). People doing lots of
+snapshotting may want a smaller value such as 128 (64KB). If you are
+not zeroing newly-allocated data, a larger $data_block_size in the
+region of 256000 (128MB) is suggested.
$low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 68cee4f5539..4fa814d3832 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -1,7 +1,12 @@
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
Required properties:
-- compatible : Should be "fsl,<chip>-sdma"
+- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
+ "fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
+ "fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
+ "fsl,imx6q-sdma". The -to variants should be preferred since they
+ allow to determnine the correct ROM script addresses needed for
+ the driver to work without additional firmware.
- reg : Should contain SDMA registers location and length
- interrupts : Should contain SDMA interrupt
- #dma-cells : Must be <3>.
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt
new file mode 100644
index 00000000000..23f8d712c3c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/k3dma.txt
@@ -0,0 +1,46 @@
+* Hisilicon K3 DMA controller
+
+See dma.txt first
+
+Required properties:
+- compatible: Should be "hisilicon,k3-dma-1.0"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: see dma.txt, should be 1, para number
+- dma-channels: physical channels supported
+- dma-requests: virtual channels supported, each virtual channel
+ have specific request line
+- clocks: clock required
+
+Example:
+
+Controller:
+ dma0: dma@fcd02000 {
+ compatible = "hisilicon,k3-dma-1.0";
+ reg = <0xfcd02000 0x1000>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ dma-requests = <27>;
+ interrupts = <0 12 4>;
+ clocks = <&pclk>;
+ status = "disable";
+ };
+
+Client:
+Use specific request line passing from dmax
+For example, i2c0 read channel request line is 18, while write channel use 19
+
+ i2c0: i2c@fcb08000 {
+ compatible = "snps,designware-i2c";
+ dmas = <&dma0 18 /* read channel */
+ &dma0 19>; /* write channel */
+ dma-names = "rx", "tx";
+ };
+
+ i2c1: i2c@fcb09000 {
+ compatible = "snps,designware-i2c";
+ dmas = <&dma0 20 /* read channel */
+ &dma0 21>; /* write channel */
+ dma-names = "rx", "tx";
+ };
+
diff --git a/Documentation/devicetree/bindings/dma/shdma.txt b/Documentation/devicetree/bindings/dma/shdma.txt
index c15994aa193..2a3f3b8946b 100644
--- a/Documentation/devicetree/bindings/dma/shdma.txt
+++ b/Documentation/devicetree/bindings/dma/shdma.txt
@@ -22,42 +22,51 @@ Optional properties (currently unused):
* DMA controller
Required properties:
-- compatible: should be "renesas,shdma"
+- compatible: should be of the form "renesas,shdma-<soc>", where <soc> should
+ be replaced with the desired SoC model, e.g.
+ "renesas,shdma-r8a73a4" for the system DMAC on r8a73a4 SoC
Example:
- dmac: dma-mux0 {
+ dmac: dma-multiplexer@0 {
compatible = "renesas,shdma-mux";
#dma-cells = <1>;
- dma-channels = <6>;
+ dma-channels = <20>;
dma-requests = <256>;
- reg = <0 0>; /* Needed for AUXDATA */
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
- dma0: shdma@fe008020 {
- compatible = "renesas,shdma";
- reg = <0xfe008020 0x270>,
- <0xfe009000 0xc>;
+ dma0: dma-controller@e6700020 {
+ compatible = "renesas,shdma-r8a73a4";
+ reg = <0 0xe6700020 0 0x89e0>;
interrupt-parent = <&gic>;
- interrupts = <0 34 4
- 0 28 4
- 0 29 4
- 0 30 4
- 0 31 4
- 0 32 4
- 0 33 4>;
+ interrupts = <0 220 4
+ 0 200 4
+ 0 201 4
+ 0 202 4
+ 0 203 4
+ 0 204 4
+ 0 205 4
+ 0 206 4
+ 0 207 4
+ 0 208 4
+ 0 209 4
+ 0 210 4
+ 0 211 4
+ 0 212 4
+ 0 213 4
+ 0 214 4
+ 0 215 4
+ 0 216 4
+ 0 217 4
+ 0 218 4
+ 0 219 4>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
- "ch4", "ch5";
- };
-
- dma1: shdma@fe018020 {
- ...
- };
-
- dma2: shdma@fe028020 {
- ...
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19";
};
};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index bd9be0b5bc2..b7943f3f999 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -19,6 +19,9 @@ Optional properties:
"bus-width = <1>" property.
- sdhci,auto-cmd12: specifies that a controller can only handle auto
CMD12.
+ - voltage-ranges : two cells are required, first cell specifies minimum
+ slot voltage (mV), second cell specifies maximum slot voltage (mV).
+ Several ranges could be specified.
Example:
@@ -29,4 +32,5 @@ sdhci@2e000 {
interrupt-parent = <&ipic>;
/* Filled in by U-Boot */
clock-frequency = <0>;
+ voltage-ranges = <3300 3300>;
};
diff --git a/Documentation/devicetree/bindings/net/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
index c2dbcec0ee3..f2105a47ec8 100644
--- a/Documentation/devicetree/bindings/net/can/sja1000.txt
+++ b/Documentation/devicetree/bindings/net/can/sja1000.txt
@@ -37,7 +37,7 @@ Optional properties:
If not specified or if the specified value is 0, the CLKOUT pin
will be disabled.
-- nxp,no-comparator-bypass : Allows to disable the CAN input comperator.
+- nxp,no-comparator-bypass : Allows to disable the CAN input comparator.
For further information, please have a look to the SJA1000 data sheet.
diff --git a/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt b/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
new file mode 100644
index 00000000000..ce44ad35756
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
@@ -0,0 +1,17 @@
+MSM Restart Driver
+
+A power supply hold (ps-hold) bit is set to power the msm chipsets.
+Clearing that bit allows us to restart/poweroff. The difference
+between poweroff and restart is determined by unique power manager IC
+settings.
+
+Required Properties:
+-compatible: "qcom,pshold"
+-reg: Specifies the physical address of the ps-hold register
+
+Example:
+
+ restart@fc4ab000 {
+ compatible = "qcom,pshold";
+ reg = <0xfc4ab000 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt b/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt
new file mode 100644
index 00000000000..c9d3ac1477f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/moxa,moxart-rtc.txt
@@ -0,0 +1,17 @@
+MOXA ART real-time clock
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-rtc"
+- gpio-rtc-sclk : RTC sclk gpio, with zero flags
+- gpio-rtc-data : RTC data gpio, with zero flags
+- gpio-rtc-reset : RTC reset gpio, with zero flags
+
+Example:
+
+ rtc: rtc {
+ compatible = "moxa,moxart-rtc";
+ gpio-rtc-sclk = <&gpio 5 0>;
+ gpio-rtc-data = <&gpio 6 0>;
+ gpio-rtc-reset = <&gpio 7 0>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index b47aa415c82..5a0f02d34d9 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -1,7 +1,11 @@
TI Real Time Clock
Required properties:
-- compatible: "ti,da830-rtc"
+- compatible:
+ - "ti,da830-rtc" - for RTC IP used similar to that on DA8xx SoC family.
+ - "ti,am3352-rtc" - for RTC IP used similar to that on AM335x SoC family.
+ This RTC IP has special WAKE-EN Register to enable
+ Wakeup generation for event Alarm.
- reg: Address range of rtc register set
- interrupts: rtc timer, alarm interrupts in order
- interrupt-parent: phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/rtc/rtc-palmas.txt b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
new file mode 100644
index 00000000000..adbccc0a51e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-palmas.txt
@@ -0,0 +1,33 @@
+Palmas RTC controller bindings
+
+Required properties:
+- compatible:
+ - "ti,palmas-rtc" for palma series of the RTC controller
+- interrupt-parent: Parent interrupt device, must be handle of palmas node.
+- interrupts: Interrupt number of RTC submodule on device.
+
+Optional properties:
+
+- ti,backup-battery-chargeable: The Palmas series device like TPS65913 or
+ TPS80036 supports the backup battery for powering the RTC when main
+ battery is removed or in very low power state. The backup battery
+ can be chargeable or non-chargeable. This flag will tells whether
+ battery is chargeable or not. If charging battery then driver can
+ enable the charging.
+- ti,backup-battery-charge-high-current: Enable high current charging in
+ backup battery. Device supports the < 100mA and > 100mA charging.
+ The high current will be > 100mA. Absence of this property will
+ charge battery to lower current i.e. < 100mA.
+
+Example:
+ palmas: tps65913@58 {
+ ...
+ palmas_rtc: rtc {
+ compatible = "ti,palmas-rtc";
+ interrupt-parent = <&palmas>;
+ interrupts = <8 0>;
+ ti,backup-battery-chargeable;
+ ti,backup-battery-charge-high-current;
+ };
+ ...
+ };
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index e31a2a9d2b0..505e71172ae 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -407,6 +407,18 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
interesting ways depending upong the exporter (if userspace starts depending
upon this implicit synchronization).
+Other Interfaces Exposed to Userspace on the dma-buf FD
+------------------------------------------------------
+
+- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only
+ with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow
+ the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other
+ llseek operation will report -EINVAL.
+
+ If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all
+ cases. Userspace can use this to detect support for discovering the dma-buf
+ size using llseek.
+
Miscellaneous notes
-------------------
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index fb57d85e731..fcb34a5697e 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -299,3 +299,6 @@ PWM
PHY
devm_usb_get_phy()
devm_usb_put_phy()
+
+SLAVE DMA ENGINE
+ devm_acpi_dma_controller_register()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index fcc22c982a2..823c95faebd 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -854,16 +854,15 @@ Committed_AS: The amount of memory presently allocated on the system.
The committed memory is a sum of all of the memory which
has been allocated by processes, even if it has not been
"used" by them as of yet. A process which malloc()'s 1G
- of memory, but only touches 300M of it will only show up
- as using 300M of memory even if it has the address space
- allocated for the entire 1G. This 1G is memory which has
- been "committed" to by the VM and can be used at any time
- by the allocating application. With strict overcommit
- enabled on the system (mode 2 in 'vm.overcommit_memory'),
- allocations which would exceed the CommitLimit (detailed
- above) will not be permitted. This is useful if one needs
- to guarantee that processes will not fail due to lack of
- memory once that memory has been successfully allocated.
+ of memory, but only touches 300M of it will show up as
+ using 1G. This 1G is memory which has been "committed" to
+ by the VM and can be used at any time by the allocating
+ application. With strict overcommit enabled on the system
+ (mode 2 in 'vm.overcommit_memory'),allocations which would
+ exceed the CommitLimit (detailed above) will not be permitted.
+ This is useful if one needs to guarantee that processes will
+ not fail due to lack of memory once that memory has been
+ successfully allocated.
VmallocTotal: total size of vmalloc memory area
VmallocUsed: amount of vmalloc area which is used
VmallocChunk: largest contiguous block of vmalloc area which is free
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.txt b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
index 59b4a0962e0..b176928e696 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.txt
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
@@ -79,6 +79,10 @@ to just make sure certain lists can't become empty.
Most systems just mount another filesystem over rootfs and ignore it. The
amount of space an empty instance of ramfs takes up is tiny.
+If CONFIG_TMPFS is enabled, rootfs will use tmpfs instead of ramfs by
+default. To force ramfs, add "rootfstype=ramfs" to the kernel command
+line.
+
What is initramfs?
------------------
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c858f8419eb..c420676c6fe 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -147,6 +147,7 @@ applicable everywhere (see syntax).
- "modules"
This declares the symbol to be used as the MODULES symbol, which
enables the third modular state for all config symbols.
+ At most one symbol may have the "modules" option set.
- "env"=<value>
This imports the environment variable into Kconfig. It behaves like
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index e349f293cc9..8ef6dbb6a46 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -175,11 +175,9 @@ Searching in menuconfig:
/^hotplug
When searching, symbols are sorted thus:
- - exact match first: an exact match is when the search matches
- the complete symbol name;
- - alphabetical order: when two symbols do not match exactly,
- they are sorted in alphabetical order (in the user's current
- locale).
+ - first, exact matches, sorted alphabetically (an exact match
+ is when the search matches the complete symbol name);
+ - then, other matches, sorted alphabetically.
For example: ^ATH.K matches:
ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG
[...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...]
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 18b64b2b8a6..f11580f8719 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -86,6 +86,8 @@ generic_netlink.txt
- info on Generic Netlink
gianfar.txt
- Gianfar Ethernet Driver.
+i40e.txt
+ - README for the Intel Ethernet Controller XL710 Driver (i40e).
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
igb.txt
diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt
new file mode 100644
index 00000000000..f737273c6dc
--- /dev/null
+++ b/Documentation/networking/i40e.txt
@@ -0,0 +1,115 @@
+Linux Base Driver for the Intel(R) Ethernet Controller XL710 Family
+===================================================================
+
+Intel i40e Linux driver.
+Copyright(c) 2013 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Additional Configurations
+- Performance Tuning
+- Known Issues
+- Support
+
+
+Identifying Your Adapter
+========================
+
+The driver in this release is compatible with the Intel Ethernet
+Controller XL710 Family.
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+ http://support.intel.com/support/network/sb/CS-012904.htm
+
+
+Enabling the driver
+===================
+
+The driver is enabled via the standard kernel configuration system,
+using the make command:
+
+ Make oldconfig/silentoldconfig/menuconfig/etc.
+
+The driver is located in the menu structure at:
+
+ -> Device Drivers
+ -> Network device support (NETDEVICES [=y])
+ -> Ethernet driver support
+ -> Intel devices
+ -> Intel(R) Ethernet Controller XL710 Family
+
+Additional Configurations
+=========================
+
+ Generic Receive Offload (GRO)
+ -----------------------------
+ The driver supports the in-kernel software implementation of GRO. GRO has
+ shown that by coalescing Rx traffic into larger chunks of data, CPU
+ utilization can be significantly reduced when under large Rx load. GRO is
+ an evolution of the previously-used LRO interface. GRO is able to coalesce
+ other protocols besides TCP. It's also safe to use with configurations that
+ are problematic for LRO, namely bridging and iSCSI.
+
+ Ethtool
+ -------
+ The driver utilizes the ethtool interface for driver configuration and
+ diagnostics, as well as displaying statistical information. The latest
+ ethtool version is required for this functionality.
+
+ The latest release of ethtool can be found from
+ https://www.kernel.org/pub/software/network/ethtool
+
+ Data Center Bridging (DCB)
+ --------------------------
+ DCB configuration is not currently supported.
+
+ FCoE
+ ----
+ Fiber Channel over Ethernet (FCoE) hardware offload is not currently
+ supported.
+
+ MAC and VLAN anti-spoofing feature
+ ----------------------------------
+ When a malicious driver attempts to send a spoofed packet, it is dropped by
+ the hardware and not transmitted. An interrupt is sent to the PF driver
+ notifying it of the spoof attempt.
+
+ When a spoofed packet is detected the PF driver will send the following
+ message to the system log (displayed by the "dmesg" command):
+
+ Spoof event(s) detected on VF (n)
+
+ Where n=the VF that attempted to do the spoofing.
+
+
+Performance Tuning
+==================
+
+An excellent article on performance tuning can be found at:
+
+http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
+
+
+Known Issues
+============
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+ http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+ http://e1000.sourceforge.net
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sourceforge.net and copy
+netdev@vger.kernel.org.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index ab7d16efa96..9d4c1d18ad4 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -182,6 +182,7 @@ core_pattern is used to specify a core dumpfile pattern name.
%<NUL> '%' is dropped
%% output one '%'
%p pid
+ %P global pid (init PID namespace)
%u uid
%g gid
%d dump mode, matches PR_SET_DUMPABLE and
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 36ecc26c743..79a797eb3e8 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -200,17 +200,25 @@ fragmentation index is <= extfrag_threshold. The default value is 500.
hugepages_treat_as_movable
-This parameter is only useful when kernelcore= is specified at boot time to
-create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
-are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
-value written to hugepages_treat_as_movable allows huge pages to be allocated
-from ZONE_MOVABLE.
-
-Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
-pages pool can easily grow or shrink within. Assuming that applications are
-not running that mlock() a lot of memory, it is likely the huge pages pool
-can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
-into nr_hugepages and triggering page reclaim.
+This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
+or not. If set to non-zero, hugepages can be allocated from ZONE_MOVABLE.
+ZONE_MOVABLE is created when kernel boot parameter kernelcore= is specified,
+so this parameter has no effect if used without kernelcore=.
+
+Hugepage migration is now available in some situations which depend on the
+architecture and/or the hugepage size. If a hugepage supports migration,
+allocation from ZONE_MOVABLE is always enabled for the hugepage regardless
+of the value of this parameter.
+IOW, this parameter affects only non-migratable hugepages.
+
+Assuming that hugepages are not migratable in your system, one usecase of
+this parameter is that users can make hugepage pool more extensible by
+enabling the allocation from ZONE_MOVABLE. This is because on ZONE_MOVABLE
+page reclaim/migration/compaction work more and you can get contiguous
+memory more likely. Note that using ZONE_MOVABLE for non-migratable
+hugepages can do harm to other features like memory hotremove (because
+memory hotremove expects that memory blocks on ZONE_MOVABLE are always
+removable,) so it's a trade-off responsible for the users.
==============================================================
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 4ac359b7aa1..bdd4bb97fff 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -165,6 +165,7 @@ which function as described above for the default huge page-sized case.
Interaction of Task Memory Policy with Huge Page Allocation/Freeing
+===================================================================
Whether huge pages are allocated and freed via the /proc interface or
the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
@@ -229,6 +230,7 @@ resulting effect on persistent huge page allocation is as follows:
of huge pages over all on-lines nodes with memory.
Per Node Hugepages Attributes
+=============================
A subset of the contents of the root huge page control directory in sysfs,
described above, will be replicated under each the system device of each
@@ -258,6 +260,7 @@ applied, from which node the huge page allocation will be attempted.
Using Huge Pages
+================
If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
@@ -296,20 +299,16 @@ calls, though the mount of filesystem will be required for using mmap calls
without MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see
map_hugetlb.c.
-*******************************************************************
+Examples
+========
-/*
- * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
- */
+1) map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
-*******************************************************************
+2) hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c
-/*
- * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c
- */
+3) hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c
-*******************************************************************
-
-/*
- * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c
- */
+4) The libhugetlbfs (http://libhugetlbfs.sourceforge.net) library provides a
+ wide range of userspace tools to help with huge page usability, environment
+ setup, and control. Furthermore it provides useful test cases that should be
+ used when modifying code to ensure no regressions are introduced.
diff --git a/Documentation/vm/soft-dirty.txt b/Documentation/vm/soft-dirty.txt
index 9a12a5956bc..55684d11a1e 100644
--- a/Documentation/vm/soft-dirty.txt
+++ b/Documentation/vm/soft-dirty.txt
@@ -28,6 +28,13 @@ This is so, since the pages are still mapped to physical memory, and thus all
the kernel does is finds this fact out and puts both writable and soft-dirty
bits on the PTE.
+ While in most cases tracking memory changes by #PF-s is more than enough
+there is still a scenario when we can lose soft dirty bits -- a task
+unmaps a previously mapped memory region and then maps a new one at exactly
+the same place. When unmap is called, the kernel internally clears PTE values
+including soft dirty bits. To notify user space application about such
+memory region renewal the kernel always marks new memory regions (and
+expanded regions) as soft dirty.
This feature is actively used by the checkpoint-restore project. You
can find more details about it on http://criu.org
diff --git a/MAINTAINERS b/MAINTAINERS
index 87efa1f5c7f..e61c2e83fc2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1028,7 +1028,7 @@ F: arch/arm/mach-orion5x/ts78xx-*
ARM/MICREL KS8695 ARCHITECTURE
M: Greg Ungerer <gerg@uclinux.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-F: arch/arm/mach-ks8695
+F: arch/arm/mach-ks8695/
S: Odd Fixes
ARM/MIOA701 MACHINE SUPPORT
@@ -1048,7 +1048,6 @@ M: STEricsson <STEricsson_nomadik_linux@list.st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-nomadik/
-F: arch/arm/plat-nomadik/
F: drivers/i2c/busses/i2c-nomadik.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
@@ -1070,7 +1069,7 @@ F: drivers/mmc/host/msm_sdcc.h
F: drivers/tty/serial/msm_serial.h
F: drivers/tty/serial/msm_serial.c
F: drivers/*/pm8???-*
-F: drivers/ssbi/
+F: drivers/mfd/ssbi/
F: include/linux/mfd/pm8xxx/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
S: Maintained
@@ -1156,7 +1155,6 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-samsung/
-F: arch/arm/plat-s3c24xx/
F: arch/arm/mach-s3c24*/
F: arch/arm/mach-s3c64xx/
F: drivers/*/*s3c2410*
@@ -1179,8 +1177,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5pv210/mach-aquila.c
F: arch/arm/mach-s5pv210/mach-goni.c
-F: arch/arm/mach-exynos/mach-universal_c210.c
-F: arch/arm/mach-exynos/mach-nuri.c
ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
@@ -1325,7 +1321,7 @@ F: drivers/mmc/host/wmt-sdmmc.c
F: drivers/pwm/pwm-vt8500.c
F: drivers/rtc/rtc-vt8500.c
F: drivers/tty/serial/vt8500_serial.c
-F: drivers/usb/host/ehci-vt8500.c
+F: drivers/usb/host/ehci-platform.c
F: drivers/usb/host/uhci-platform.c
F: drivers/video/vt8500lcdfb.*
F: drivers/video/wm8505fb*
@@ -1815,6 +1811,17 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
+BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+M: Christian Daudt <csd@broadcom.com>
+T: git git://git.github.com/broadcom/bcm11351
+S: Maintained
+F: arch/arm/mach-bcm/
+F: arch/arm/boot/dts/bcm113*
+F: arch/arm/boot/dts/bcm281*
+F: arch/arm/configs/bcm_defconfig
+F: drivers/mmc/host/sdhci_bcm_kona.c
+F: drivers/clocksource/bcm_kona_timer.c
+
BROADCOM BCM2835 ARM ARCHICTURE
M: Stephen Warren <swarren@wwwdotorg.org>
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2035,10 +2042,10 @@ W: http://ceph.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
S: Supported
F: Documentation/filesystems/ceph.txt
-F: fs/ceph
-F: net/ceph
-F: include/linux/ceph
-F: include/linux/crush
+F: fs/ceph/
+F: net/ceph/
+F: include/linux/ceph/
+F: include/linux/crush/
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
L: linux-usb@vger.kernel.org
@@ -2335,7 +2342,7 @@ CPU POWER MONITORING SUBSYSTEM
M: Dominik Brodowski <linux@dominikbrodowski.net>
M: Thomas Renninger <trenn@suse.de>
S: Maintained
-F: tools/power/cpupower
+F: tools/power/cpupower/
CPUSETS
M: Li Zefan <lizefan@huawei.com>
@@ -2773,7 +2780,7 @@ L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
-F: drivers/gpu/drm/i915
+F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/uapi/drm/i915*
@@ -2785,7 +2792,7 @@ M: Kyungmin Park <kyungmin.park@samsung.com>
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
S: Supported
-F: drivers/gpu/drm/exynos
+F: drivers/gpu/drm/exynos/
F: include/drm/exynos*
F: include/uapi/drm/exynos*
@@ -3038,7 +3045,7 @@ M: Mauro Carvalho Chehab <m.chehab@samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
-F: drivers/edac/ghes-edac.c
+F: drivers/edac/ghes_edac.c
EDAC-I82443BXGX
M: Tim Small <tim@buttersideup.com>
@@ -3644,8 +3651,8 @@ M: Arnd Bergmann <arnd@arndb.de>
L: linux-arch@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git
S: Maintained
-F: include/asm-generic
-F: include/uapi/asm-generic
+F: include/asm-generic/
+F: include/uapi/asm-generic/
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>
@@ -3687,7 +3694,8 @@ GRE DEMULTIPLEXER DRIVER
M: Dmitry Kozlov <xeb@mail.ru>
L: netdev@vger.kernel.org
S: Maintained
-F: net/ipv4/gre.c
+F: net/ipv4/gre_demux.c
+F: net/ipv4/gre_offload.c
F: include/net/gre.h
GRETH 10/100/1G Ethernet MAC device driver
@@ -3765,7 +3773,7 @@ L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
S: Odd Fixes
-F: drivers/media/usb/hdpvr
+F: drivers/media/usb/hdpvr/
HWPOISON MEMORY FAILURE HANDLING
M: Andi Kleen <andi@firstfloor.org>
@@ -4355,7 +4363,7 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e)
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
@@ -4380,6 +4388,7 @@ F: Documentation/networking/igbvf.txt
F: Documentation/networking/ixgb.txt
F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
+F: Documentation/networking/i40e.txt
F: drivers/net/ethernet/intel/
INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
@@ -4573,7 +4582,7 @@ S: Supported
W: http://www.openfabrics.org
W: www.open-iscsi.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-F: drivers/infiniband/ulp/iser
+F: drivers/infiniband/ulp/iser/
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
@@ -4627,7 +4636,7 @@ W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
-F: drivers/media/tuners/it913x*
+F: drivers/media/tuners/tuner_it913x*
IVTV VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
@@ -5963,15 +5972,12 @@ S: Maintained
F: arch/arm/*omap*/*pm*
F: drivers/cpufreq/omap-cpufreq.c
-OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
+OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
M: Rajendra Nayak <rnayak@ti.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
-F: arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
-F: arch/arm/mach-omap2/powerdomain44xx.c
-F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
-F: arch/arm/mach-omap2/clockdomain44xx.c
+F: arch/arm/mach-omap2/prm*
OMAP AUDIO SUPPORT
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -6137,7 +6143,7 @@ W: http://openrisc.net
L: linux@lists.openrisc.net (moderated for non-subscribers)
S: Maintained
T: git git://openrisc.net/~jonas/linux
-F: arch/openrisc
+F: arch/openrisc/
OPENVSWITCH
M: Jesse Gross <jesse@nicira.com>
@@ -6428,7 +6434,7 @@ M: Jamie Iles <jamie@jamieiles.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/jamieiles/linux-2.6-ji.git
S: Supported
-F: arch/arm/mach-picoxcell
+F: arch/arm/mach-picoxcell/
F: drivers/*/picoxcell*
F: drivers/*/*/picoxcell*
@@ -6701,7 +6707,7 @@ F: drivers/spi/spi-pxa2xx*
F: drivers/usb/gadget/pxa2*
F: include/sound/pxa2xx-lib.h
F: sound/arm/pxa*
-F: sound/soc/pxa
+F: sound/soc/pxa/
MMP SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
@@ -7154,7 +7160,7 @@ SAMSUNG AUDIO (ASoC) DRIVERS
M: Sangbeom Kim <sbkim73@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
-F: sound/soc/samsung
+F: sound/soc/samsung/
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jg1.han@samsung.com>
@@ -7200,10 +7206,11 @@ SERIAL DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-serial@vger.kernel.org
S: Maintained
-F: drivers/tty/serial
+F: drivers/tty/serial/
SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <viresh.linux@gmail.com>
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: include/linux/dw_dmac.h
F: drivers/dma/dw/
@@ -7234,7 +7241,7 @@ TLG2300 VIDEO4LINUX-2 DRIVER
M: Huang Shijie <shijie8@gmail.com>
M: Hans Verkuil <hverkuil@xs4all.nl>
S: Odd Fixes
-F: drivers/media/usb/tlg2300
+F: drivers/media/usb/tlg2300/
SC1200 WDT DRIVER
M: Zwane Mwaikambo <zwane@arm.linux.org.uk>
@@ -7495,7 +7502,7 @@ L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
S: Odd Fixes
-F: drivers/media/radio/radio-si4713.h
+F: drivers/media/radio/radio-si4713.c
SIANO DVB DRIVER
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
@@ -7504,9 +7511,9 @@ W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
S: Odd fixes
F: drivers/media/common/siano/
-F: drivers/media/dvb/siano/
F: drivers/media/usb/siano/
-F: drivers/media/mmc/siano
+F: drivers/media/usb/siano/
+F: drivers/media/mmc/siano/
SH_VEU V4L2 MEM2MEM DRIVER
M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
@@ -7544,9 +7551,9 @@ P: Vincent Sanders <vince@simtec.co.uk>
M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported
-F: arch/arm/mach-s3c2410/mach-bast.c
-F: arch/arm/mach-s3c2410/bast-ide.c
-F: arch/arm/mach-s3c2410/bast-irq.c
+F: arch/arm/mach-s3c24xx/mach-bast.c
+F: arch/arm/mach-s3c24xx/bast-ide.c
+F: arch/arm/mach-s3c24xx/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
@@ -7555,7 +7562,7 @@ L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers
T: git git://gitorious.org/linux-davinci/linux-davinci.git
Q: http://patchwork.kernel.org/project/linux-davinci/list/
S: Supported
-F: arch/arm/mach-davinci
+F: arch/arm/mach-davinci/
F: drivers/i2c/busses/i2c-davinci.c
TI DAVINCI SERIES MEDIA DRIVER
@@ -7623,6 +7630,14 @@ S: Maintained
F: Documentation/security/Smack.txt
F: security/smack/
+SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
+M: Kevin Hilman <khilman@kernel.org>
+M: Nishanth Menon <nm@ti.com>
+S: Maintained
+F: drivers/power/avs/smartreflex.c
+F: include/linux/power/smartreflex.h
+L: linux-pm@vger.kernel.org
+
SMC91x ETHERNET DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -7632,7 +7647,7 @@ SMIA AND SMIA++ IMAGE SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-media@vger.kernel.org
S: Maintained
-F: drivers/media/i2c/smiapp
+F: drivers/media/i2c/smiapp/
F: include/media/smiapp.h
F: drivers/media/i2c/smiapp-pll.c
F: drivers/media/i2c/smiapp-pll.h
@@ -7735,6 +7750,11 @@ W: http://tifmxx.berlios.de/
S: Maintained
F: drivers/memstick/host/tifm_ms.c
+SONY MEMORYSTICK STANDARD SUPPORT
+M: Maxim Levitsky <maximlevitsky@gmail.com>
+S: Maintained
+F: drivers/memstick/core/ms_block.*
+
SOUND
M: Jaroslav Kysela <perex@perex.cz>
M: Takashi Iwai <tiwai@suse.de>
@@ -7811,35 +7831,7 @@ L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: arch/arm/plat-spear/
-
-SPEAR13XX MACHINE SUPPORT
-M: Viresh Kumar <viresh.linux@gmail.com>
-M: Shiraz Hashim <shiraz.hashim@st.com>
-L: spear-devel@list.st.com
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.st.com/spear
-S: Maintained
-F: arch/arm/mach-spear13xx/
-
-SPEAR3XX MACHINE SUPPORT
-M: Viresh Kumar <viresh.linux@gmail.com>
-M: Shiraz Hashim <shiraz.hashim@st.com>
-L: spear-devel@list.st.com
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.st.com/spear
-S: Maintained
-F: arch/arm/mach-spear3xx/
-
-SPEAR6XX MACHINE SUPPORT
-M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
-M: Shiraz Hashim <shiraz.hashim@st.com>
-M: Viresh Kumar <viresh.linux@gmail.com>
-L: spear-devel@list.st.com
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.st.com/spear
-S: Maintained
-F: arch/arm/mach-spear6xx/
+F: arch/arm/mach-spear/
SPEAR CLOCK FRAMEWORK SUPPORT
M: Viresh Kumar <viresh.linux@gmail.com>
@@ -8108,7 +8100,7 @@ M: Vineet Gupta <vgupta@synopsys.com>
S: Supported
F: arch/arc/
F: Documentation/devicetree/bindings/arc/
-F: drivers/tty/serial/arc-uart.c
+F: drivers/tty/serial/arc_uart.c
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
@@ -8798,7 +8790,6 @@ L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/phy/
-F: drivers/usb/otg/
USB PRINTER DRIVER (usblp)
M: Pete Zaitcev <zaitcev@redhat.com>
@@ -9329,7 +9320,7 @@ M: Matthew Garrett <matthew.garrett@nebula.com>
L: platform-driver-x86@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
S: Maintained
-F: drivers/platform/x86
+F: drivers/platform/x86/
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 40736da9bea..ffb19b7da99 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -338,6 +338,11 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
unsigned long doff = 7 & (unsigned long) dst;
if (len) {
+ if (!access_ok(VERIFY_READ, src, len)) {
+ *errp = -EFAULT;
+ memset(dst, 0, len);
+ return sum;
+ }
if (!doff) {
if (!soff)
checksum = csum_partial_cfu_aligned(
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index bdee3a81205..2340af0e1d6 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -18,12 +18,6 @@
#include <asm/clk.h>
#include <asm/mach_desc.h>
-/* called from unflatten_device_tree() to bootstrap devicetree itself */
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a08ce718542..81279ec73a6 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -127,9 +127,8 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n", __func__, start, end);
+ pr_err("%s(%llx, %llx)\n", __func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 39ad030ac0c..117f955a2a0 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -1235,6 +1235,23 @@ void edma_resume(unsigned channel)
}
EXPORT_SYMBOL(edma_resume);
+int edma_trigger_channel(unsigned channel)
+{
+ unsigned ctlr;
+ unsigned int mask;
+
+ ctlr = EDMA_CTLR(channel);
+ channel = EDMA_CHAN_SLOT(channel);
+ mask = BIT(channel & 0x1f);
+
+ edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
+
+ pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
+ edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
+ return 0;
+}
+EXPORT_SYMBOL(edma_trigger_channel);
+
/**
* edma_start - start dma on a channel
* @channel: channel being activated
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 64f2e50e19c..6bc1c181581 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -224,62 +224,15 @@ static struct ep93xx_spi_chip_ops vision_spi_flash_hw = {
#define VISION_SPI_MMC_WP EP93XX_GPIO_LINE_F(0)
#define VISION_SPI_MMC_CD EP93XX_GPIO_LINE_EGPIO15
-static struct gpio vision_spi_mmc_gpios[] = {
- { VISION_SPI_MMC_WP, GPIOF_DIR_IN, "mmc_spi:wp" },
- { VISION_SPI_MMC_CD, GPIOF_DIR_IN, "mmc_spi:cd" },
-};
-
-static int vision_spi_mmc_init(struct device *pdev,
- irqreturn_t (*func)(int, void *), void *pdata)
-{
- int err;
-
- err = gpio_request_array(vision_spi_mmc_gpios,
- ARRAY_SIZE(vision_spi_mmc_gpios));
- if (err)
- return err;
-
- err = gpio_set_debounce(VISION_SPI_MMC_CD, 1);
- if (err)
- goto exit_err;
-
- err = request_irq(gpio_to_irq(VISION_SPI_MMC_CD), func,
- IRQ_TYPE_EDGE_BOTH, "mmc_spi:cd", pdata);
- if (err)
- goto exit_err;
-
- return 0;
-
-exit_err:
- gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
- return err;
-
-}
-
-static void vision_spi_mmc_exit(struct device *pdev, void *pdata)
-{
- free_irq(gpio_to_irq(VISION_SPI_MMC_CD), pdata);
- gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios));
-}
-
-static int vision_spi_mmc_get_ro(struct device *pdev)
-{
- return !!gpio_get_value(VISION_SPI_MMC_WP);
-}
-
-static int vision_spi_mmc_get_cd(struct device *pdev)
-{
- return !gpio_get_value(VISION_SPI_MMC_CD);
-}
-
static struct mmc_spi_platform_data vision_spi_mmc_data = {
- .init = vision_spi_mmc_init,
- .exit = vision_spi_mmc_exit,
- .get_ro = vision_spi_mmc_get_ro,
- .get_cd = vision_spi_mmc_get_cd,
.detect_delay = 100,
.powerup_msecs = 100,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .flags = MMC_SPI_USE_CD_GPIO | MMC_SPI_USE_RO_GPIO,
+ .cd_gpio = VISION_SPI_MMC_CD,
+ .cd_debounce = 1,
+ .ro_gpio = VISION_SPI_MMC_WP,
+ .caps2 = MMC_CAP2_RO_ACTIVE_HIGH,
};
static int vision_spi_mmc_hw_setup(struct spi_device *spi)
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index e065c117f5a..5211f62c624 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -61,25 +61,8 @@ void __init mx25_init_irq(void)
mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
}
-static struct sdma_script_start_addrs imx25_sdma_script __initdata = {
- .ap_2_ap_addr = 729,
- .uart_2_mcu_addr = 904,
- .per_2_app_addr = 1255,
- .mcu_2_app_addr = 834,
- .uartsh_2_mcu_addr = 1120,
- .per_2_shp_addr = 1329,
- .mcu_2_shp_addr = 1048,
- .ata_2_mcu_addr = 1560,
- .mcu_2_ata_addr = 1479,
- .app_2_per_addr = 1189,
- .app_2_mcu_addr = 770,
- .shp_2_per_addr = 1407,
- .shp_2_mcu_addr = 979,
-};
-
static struct sdma_platform_data imx25_sdma_pdata __initdata = {
.fw_name = "sdma-imx25.bin",
- .script_addrs = &imx25_sdma_script,
};
static const struct resource imx25_audmux_res[] __initconst = {
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index a8229b7f10b..eb3cce38c70 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -103,22 +103,8 @@ void __init mx53_init_irq(void)
tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR));
}
-static struct sdma_script_start_addrs imx51_sdma_script __initdata = {
- .ap_2_ap_addr = 642,
- .uart_2_mcu_addr = 817,
- .mcu_2_app_addr = 747,
- .mcu_2_shp_addr = 961,
- .ata_2_mcu_addr = 1473,
- .mcu_2_ata_addr = 1392,
- .app_2_per_addr = 1033,
- .app_2_mcu_addr = 683,
- .shp_2_per_addr = 1251,
- .shp_2_mcu_addr = 892,
-};
-
static struct sdma_platform_data imx51_sdma_pdata __initdata = {
.fw_name = "sdma-imx51.bin",
- .script_addrs = &imx51_sdma_script,
};
static const struct resource imx51_audmux_res[] __initconst = {
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 66781bf3407..54ee6163c18 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -56,3 +56,8 @@ int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
+
+int pmd_huge_support(void)
+{
+ return 1;
+}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 93cbf566a97..febaee7ca57 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -78,7 +78,7 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8a6295c8620..83e4f959ee4 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -21,6 +21,8 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/cpuidle.h>
+#include <linux/cpufreq.h>
#include <linux/mm.h>
@@ -267,18 +269,28 @@ static int __init xen_guest_init(void)
if (!xen_initial_domain())
xenbus_probe(NULL);
+ /*
+ * Making sure board specific code will not set up ops for
+ * cpu idle and cpu freq.
+ */
+ disable_cpuidle();
+ disable_cpufreq();
+
return 0;
}
core_initcall(xen_guest_init);
static int __init xen_pm_init(void)
{
+ if (!xen_domain())
+ return -ENODEV;
+
pm_power_off = xen_power_off;
arm_pm_restart = xen_restart;
return 0;
}
-subsys_initcall(xen_pm_init);
+late_initcall(xen_pm_init);
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bca4c1c2052..12ad8f3d0cf 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -190,11 +190,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
/*
* Limit the memory size that was specified via FDT.
*/
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2fc8258bab2..5e9aec35830 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -54,6 +54,11 @@ int pud_huge(pud_t pud)
return !(pud_val(pud) & PUD_TABLE_BIT);
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
+
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 67e8d7ce3fe..de2de5db628 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -44,8 +44,7 @@ static unsigned long phys_initrd_size __initdata = 0;
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index bdb56f09d0a..9e15ab9199b 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -33,8 +33,7 @@ void __init early_init_devtree(void *params)
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
@@ -46,8 +45,3 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
c6x_add_memory(base, size);
}
-
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 3201ddb8da6..c699d325987 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -99,9 +99,6 @@ config ETRAX_KMALLOCED_MODULES
help
Enable module allocation with kmalloc instead of vmalloc.
-config OOM_REBOOT
- bool "Enable reboot at out of memory"
-
source "kernel/Kconfig.preempt"
source mm/Kconfig
@@ -175,12 +172,6 @@ config ETRAX_FLASH_BUSWIDTH
help
Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2.
-config ETRAX_NANDFLASH_BUSWIDTH
- int "Buswidth of NAND flash in bytes"
- default "1"
- help
- Width in bytes of the NAND flash (1 or 2).
-
config ETRAX_FLASH1_SIZE
int "FLASH1 size (dec, in MB. 0 = Unknown)"
default "0"
@@ -272,38 +263,6 @@ config ETRAX_AXISFLASHMAP
This option enables MTD mapping of flash devices. Needed to use
flash memories. If unsure, say Y.
-config ETRAX_RTC
- bool "Real Time Clock support"
- depends on ETRAX_I2C
- help
- Enables drivers for the Real-Time Clock battery-backed chips on
- some products. The kernel reads the time when booting, and
- the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
- rtc_time struct (see <file:arch/cris/include/asm/rtc.h>) on the
- /dev/rtc device. You can check the time with cat /proc/rtc, but
- normal time reading should be done using libc function time and
- friends.
-
-choice
- prompt "RTC chip"
- depends on ETRAX_RTC
- default ETRAX_DS1302
-
-config ETRAX_DS1302
- depends on ETRAX_ARCH_V10
- bool "DS1302"
- help
- Enables the driver for the DS1302 Real-Time Clock battery-backed
- chip on some products.
-
-config ETRAX_PCF8563
- bool "PCF8563"
- help
- Enables the driver for the PCF8563 Real-Time Clock battery-backed
- chip on some products.
-
-endchoice
-
config ETRAX_SYNCHRONOUS_SERIAL
bool "Synchronous serial-port support"
help
@@ -578,26 +537,6 @@ config ETRAX_SERIAL_PORT3_DMA5_IN
depends on ETRAX_ARCH_V10
bool "DMA 5"
-config ETRAX_SERIAL_PORT3_DMA9_IN
- bool "Ser3 uses DMA9 for input"
- depends on ETRAXFS
- help
- Enables the DMA9 input channel for ser3 (ttyS3).
- If you do not enable DMA, an interrupt for each character will be
- used when receiving data.
- Normally you want to use DMA, unless you use the DMA channel for
- something else.
-
-config ETRAX_SERIAL_PORT3_DMA3_IN
- bool "Ser3 uses DMA3 for input"
- depends on CRIS_MACH_ARTPEC3
- help
- Enables the DMA3 input channel for ser3 (ttyS3).
- If you do not enable DMA, an interrupt for each character will be
- used when receiving data.
- Normally you want to use DMA, unless you use the DMA channel for
- something else.
-
endchoice
choice
@@ -615,26 +554,6 @@ config ETRAX_SERIAL_PORT3_DMA4_OUT
depends on ETRAX_ARCH_V10
bool "DMA 4"
-config ETRAX_SERIAL_PORT3_DMA8_OUT
- bool "Ser3 uses DMA8 for output"
- depends on ETRAXFS
- help
- Enables the DMA8 output channel for ser3 (ttyS3).
- If you do not enable DMA, an interrupt for each character will be
- used when transmitting data.
- Normally you want to use DMA, unless you use the DMA channel for
- something else.
-
-config ETRAX_SERIAL_PORT3_DMA2_OUT
- bool "Ser3 uses DMA2 for output"
- depends on CRIS_MACH_ARTPEC3
- help
- Enables the DMA2 output channel for ser3 (ttyS3).
- If you do not enable DMA, an interrupt for each character will be
- used when transmitting data.
- Normally you want to use DMA, unless you use the DMA channel for
- something else.
-
endchoice
endmenu
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index daf5f19b61a..239dab0b95c 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -417,16 +417,6 @@ config ETRAX_USB_HOST
for CTRL and BULK traffic only, INTR traffic may work as well
however (depending on the requirements of timeliness).
-config ETRAX_USB_HOST_PORT1
- bool "USB port 1 enabled"
- depends on ETRAX_USB_HOST
- default n
-
-config ETRAX_USB_HOST_PORT2
- bool "USB port 2 enabled"
- depends on ETRAX_USB_HOST
- default n
-
config ETRAX_PTABLE_SECTOR
int "Byte-offset of partition table sector"
depends on ETRAX_AXISFLASHMAP
@@ -527,19 +517,6 @@ config ETRAX_GPIO
Remember that you need to setup the port directions appropriately in
the General configuration.
-config ETRAX_PA_BUTTON_BITMASK
- hex "PA-buttons bitmask"
- depends on ETRAX_GPIO
- default "02"
- help
- This is a bitmask with information about what bits on PA that
- are used for buttons.
- Most products has a so called TEST button on PA1, if that's true
- use 02 here.
- Use 00 if there are no buttons on PA.
- If the bitmask is <> 00 a button driver will be included in the gpio
- driver. ETRAX general I/O support must be enabled.
-
config ETRAX_PA_CHANGEABLE_DIR
hex "PA user changeable dir mask"
depends on ETRAX_GPIO
@@ -580,51 +557,4 @@ config ETRAX_PB_CHANGEABLE_BITS
Bit set = changeable.
You probably want 00 here.
-config ETRAX_DS1302_RST_ON_GENERIC_PORT
- bool "DS1302 RST on Generic Port"
- depends on ETRAX_DS1302
- help
- If your product has the RST signal line for the DS1302 RTC on the
- Generic Port then say Y here, otherwise leave it as N in which
- case the RST signal line is assumed to be connected to Port PB
- (just like the SCL and SDA lines).
-
-config ETRAX_DS1302_RSTBIT
- int "DS1302 RST bit number"
- depends on ETRAX_DS1302
- default "2"
- help
- This is the bit number for the RST signal line of the DS1302 RTC on
- the selected port. If you have selected the generic port then it
- should be bit 27, otherwise your best bet is bit 5.
-
-config ETRAX_DS1302_SCLBIT
- int "DS1302 SCL bit number"
- depends on ETRAX_DS1302
- default "1"
- help
- This is the bit number for the SCL signal line of the DS1302 RTC on
- Port PB. This is probably best left at 3.
-
-config ETRAX_DS1302_SDABIT
- int "DS1302 SDA bit number"
- depends on ETRAX_DS1302
- default "0"
- help
- This is the bit number for the SDA signal line of the DS1302 RTC on
- Port PB. This is probably best left at 2.
-
-config ETRAX_DS1302_TRICKLE_CHARGE
- int "DS1302 Trickle charger value"
- depends on ETRAX_DS1302
- default "0"
- help
- This controls the initial value of the trickle charge register.
- 0 = disabled (use this if you are unsure or have a non rechargeable battery)
- Otherwise the following values can be OR:ed together to control the
- charge current:
- 1 = 2kohm, 2 = 4kohm, 3 = 4kohm
- 4 = 1 diode, 8 = 2 diodes
- Allowed values are (increasing current): 0, 11, 10, 9, 7, 6, 5
-
endif
diff --git a/arch/cris/arch-v10/drivers/Makefile b/arch/cris/arch-v10/drivers/Makefile
index 44bf2e88c26..e5c13183b97 100644
--- a/arch/cris/arch-v10/drivers/Makefile
+++ b/arch/cris/arch-v10/drivers/Makefile
@@ -6,7 +6,5 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o
obj-$(CONFIG_ETRAX_I2C) += i2c.o
obj-$(CONFIG_ETRAX_I2C_EEPROM) += eeprom.o
obj-$(CONFIG_ETRAX_GPIO) += gpio.o
-obj-$(CONFIG_ETRAX_DS1302) += ds1302.o
-obj-$(CONFIG_ETRAX_PCF8563) += pcf8563.o
obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 1d866d3ee2f..6792503aaf7 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -19,64 +19,6 @@ config ETRAX_NO_PHY
switch. This option should normally be disabled. If enabled,
speed and duplex will be locked to 100 Mbit and full duplex.
-config ETRAX_ETHERNET_IFACE0
- depends on ETRAX_ETHERNET
- bool "Enable network interface 0"
-
-config ETRAX_ETHERNET_IFACE1
- depends on (ETRAX_ETHERNET && ETRAXFS)
- bool "Enable network interface 1 (uses DMA6 and DMA7)"
-
-config ETRAX_ETHERNET_GBIT
- depends on (ETRAX_ETHERNET && CRIS_MACH_ARTPEC3)
- bool "Enable gigabit Ethernet support"
-
-choice
- prompt "Eth0 led group"
- depends on ETRAX_ETHERNET_IFACE0
- default ETRAX_ETH0_USE_LEDGRP0
-
-config ETRAX_ETH0_USE_LEDGRP0
- bool "Use LED grp 0"
- depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
- help
- Use LED grp 0 for eth0
-
-config ETRAX_ETH0_USE_LEDGRP1
- bool "Use LED grp 1"
- depends on ETRAX_NBR_LED_GRP_TWO
- help
- Use LED grp 1 for eth0
-
-config ETRAX_ETH0_USE_LEDGRPNULL
- bool "Use no LEDs for eth0"
- help
- Use no LEDs for eth0
-endchoice
-
-choice
- prompt "Eth1 led group"
- depends on ETRAX_ETHERNET_IFACE1
- default ETRAX_ETH1_USE_LEDGRP1
-
-config ETRAX_ETH1_USE_LEDGRP0
- bool "Use LED grp 0"
- depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
- help
- Use LED grp 0 for eth1
-
-config ETRAX_ETH1_USE_LEDGRP1
- bool "Use LED grp 1"
- depends on ETRAX_NBR_LED_GRP_TWO
- help
- Use LED grp 1 for eth1
-
-config ETRAX_ETH1_USE_LEDGRPNULL
- bool "Use no LEDs for eth1"
- help
- Use no LEDs for eth1
-endchoice
-
config ETRAXFS_SERIAL
bool "Serial-port support"
depends on ETRAX_ARCH_V32
@@ -108,261 +50,24 @@ config ETRAX_SERIAL_PORT0
if you do not need DMA to something else.
ser0 can use dma4 or dma6 for output and dma5 or dma7 for input.
-choice
- prompt "Ser0 default port type "
- depends on ETRAX_SERIAL_PORT0
- default ETRAX_SERIAL_PORT0_TYPE_232
- help
- Type of serial port.
-
-config ETRAX_SERIAL_PORT0_TYPE_232
- bool "Ser0 is a RS-232 port"
- help
- Configure serial port 0 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485HD
- bool "Ser0 is a half duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 0 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485FD
- bool "Ser0 is a full duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 0 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER0_DTR_BIT
- string "Ser 0 DTR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_RI_BIT
- string "Ser 0 RI bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_DSR_BIT
- string "Ser 0 DSR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_CD_BIT
- string "Ser 0 CD bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT0
-
config ETRAX_SERIAL_PORT1
bool "Serial port 1 enabled"
depends on ETRAXFS_SERIAL
help
Enables the ETRAX FS serial driver for ser1 (ttyS1).
-choice
- prompt "Ser1 default port type"
- depends on ETRAX_SERIAL_PORT1
- default ETRAX_SERIAL_PORT1_TYPE_232
- help
- Type of serial port.
-
-config ETRAX_SERIAL_PORT1_TYPE_232
- bool "Ser1 is a RS-232 port"
- help
- Configure serial port 1 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485HD
- bool "Ser1 is a half duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 1 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485FD
- bool "Ser1 is a full duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 1 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER1_DTR_BIT
- string "Ser 1 DTR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_RI_BIT
- string "Ser 1 RI bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_DSR_BIT
- string "Ser 1 DSR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_CD_BIT
- string "Ser 1 CD bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT1
-
config ETRAX_SERIAL_PORT2
bool "Serial port 2 enabled"
depends on ETRAXFS_SERIAL
help
Enables the ETRAX FS serial driver for ser2 (ttyS2).
-choice
- prompt "Ser2 default port type"
- depends on ETRAX_SERIAL_PORT2
- default ETRAX_SERIAL_PORT2_TYPE_232
- help
- What DMA channel to use for ser2
-
-config ETRAX_SERIAL_PORT2_TYPE_232
- bool "Ser2 is a RS-232 port"
- help
- Configure serial port 2 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485HD
- bool "Ser2 is a half duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 2 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485FD
- bool "Ser2 is a full duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 2 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-
-config ETRAX_SER2_DTR_BIT
- string "Ser 2 DTR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_RI_BIT
- string "Ser 2 RI bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_DSR_BIT
- string "Ser 2 DSR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_CD_BIT
- string "Ser 2 CD bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT2
-
config ETRAX_SERIAL_PORT3
bool "Serial port 3 enabled"
depends on ETRAXFS_SERIAL
help
Enables the ETRAX FS serial driver for ser3 (ttyS3).
-choice
- prompt "Ser3 default port type"
- depends on ETRAX_SERIAL_PORT3
- default ETRAX_SERIAL_PORT3_TYPE_232
- help
- What DMA channel to use for ser3.
-
-config ETRAX_SERIAL_PORT3_TYPE_232
- bool "Ser3 is a RS-232 port"
- help
- Configure serial port 3 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485HD
- bool "Ser3 is a half duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 3 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485FD
- bool "Ser3 is a full duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 3 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER3_DTR_BIT
- string "Ser 3 DTR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_RI_BIT
- string "Ser 3 RI bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_DSR_BIT
- string "Ser 3 DSR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_CD_BIT
- string "Ser 3 CD bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SERIAL_PORT4
- bool "Serial port 4 enabled"
- depends on ETRAXFS_SERIAL && CRIS_MACH_ARTPEC3
- help
- Enables the ETRAX FS serial driver for ser4 (ttyS4).
-
-choice
- prompt "Ser4 default port type"
- depends on ETRAX_SERIAL_PORT4
- default ETRAX_SERIAL_PORT4_TYPE_232
- help
- What DMA channel to use for ser4.
-
-config ETRAX_SERIAL_PORT4_TYPE_232
- bool "Ser4 is a RS-232 port"
- help
- Configure serial port 4 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485HD
- bool "Ser4 is a half duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 4 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485FD
- bool "Ser4 is a full duplex RS-485 port"
- depends on ETRAX_RS485
- help
- Configure serial port 4 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-choice
- prompt "Ser4 DMA in channel "
- depends on ETRAX_SERIAL_PORT4
- default ETRAX_SERIAL_PORT4_NO_DMA_IN
- help
- What DMA channel to use for ser4.
-
-
-config ETRAX_SERIAL_PORT4_NO_DMA_IN
- bool "Ser4 uses no DMA for input"
- help
- Do not use DMA for ser4 input.
-
-config ETRAX_SERIAL_PORT4_DMA9_IN
- bool "Ser4 uses DMA9 for input"
- depends on ETRAX_SERIAL_PORT4
- help
- Enables the DMA9 input channel for ser4 (ttyS4).
- If you do not enable DMA, an interrupt for each character will be
- used when receiving data.
- Normally you want to use DMA, unless you use the DMA channel for
- something else.
-
-endchoice
-
-config ETRAX_SER4_DTR_BIT
- string "Ser 4 DTR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_RI_BIT
- string "Ser 4 RI bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_DSR_BIT
- string "Ser 4 DSR bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_CD_BIT
- string "Ser 4 CD bit (empty = not used)"
- depends on ETRAX_SERIAL_PORT4
-
config ETRAX_SYNCHRONOUS_SERIAL
bool "Synchronous serial-port support"
depends on ETRAX_ARCH_V32
@@ -703,32 +408,6 @@ config ETRAX_SPI_SSER0
want to build it as a module, which will be named spi_crisv32_sser.
(You need to select MMC separately.)
-config ETRAX_SPI_SSER0_DMA
- bool "DMA for SPI on sser0 enabled"
- depends on ETRAX_SPI_SSER0
- depends on !ETRAX_SERIAL_PORT1_DMA4_OUT && !ETRAX_SERIAL_PORT1_DMA5_IN
- default y
- help
- Say Y if using DMA (dma4/dma5) for SPI on synchronous serial port 0.
-
-config ETRAX_SPI_MMC_CD_SSER0_PIN
- string "MMC/SD card detect pin for SPI on sser0"
- depends on ETRAX_SPI_SSER0 && MMC_SPI
- default "pd11"
- help
- The pin to use for SD/MMC card detect. This pin should be pulled up
- and grounded when a card is present. If defined as " " (space), no
- pin is selected. A card must then always be inserted for proper
- action.
-
-config ETRAX_SPI_MMC_WP_SSER0_PIN
- string "MMC/SD card write-protect pin for SPI on sser0"
- depends on ETRAX_SPI_SSER0 && MMC_SPI
- default "pd10"
- help
- The pin to use for the SD/MMC write-protect signal for a memory
- card. If defined as " " (space), the card is considered writable.
-
config ETRAX_SPI_SSER1
tristate "SPI using synchronous serial port 1 (sser1)"
depends on ETRAX_SPI_MMC
@@ -742,32 +421,6 @@ config ETRAX_SPI_SSER1
want to build it as a module, which will be named spi_crisv32_sser.
(You need to select MMC separately.)
-config ETRAX_SPI_SSER1_DMA
- bool "DMA for SPI on sser1 enabled"
- depends on ETRAX_SPI_SSER1 && !ETRAX_ETHERNET_IFACE1
- depends on !ETRAX_SERIAL_PORT0_DMA6_OUT && !ETRAX_SERIAL_PORT0_DMA7_IN
- default y
- help
- Say Y if using DMA (dma6/dma7) for SPI on synchronous serial port 1.
-
-config ETRAX_SPI_MMC_CD_SSER1_PIN
- string "MMC/SD card detect pin for SPI on sser1"
- depends on ETRAX_SPI_SSER1 && MMC_SPI
- default "pd12"
- help
- The pin to use for SD/MMC card detect. This pin should be pulled up
- and grounded when a card is present. If defined as " " (space), no
- pin is selected. A card must then always be inserted for proper
- action.
-
-config ETRAX_SPI_MMC_WP_SSER1_PIN
- string "MMC/SD card write-protect pin for SPI on sser1"
- depends on ETRAX_SPI_SSER1 && MMC_SPI
- default "pd9"
- help
- The pin to use for the SD/MMC write-protect signal for a memory
- card. If defined as " " (space), the card is considered writable.
-
config ETRAX_SPI_GPIO
tristate "Bitbanged SPI using gpio pins"
depends on ETRAX_SPI_MMC
@@ -782,51 +435,4 @@ config ETRAX_SPI_GPIO
Say m to build it as a module, which will be called spi_crisv32_gpio.
(You need to select MMC separately.)
-# The default match that of sser0, only because that's how it was tested.
-config ETRAX_SPI_CS_PIN
- string "SPI chip select pin"
- depends on ETRAX_SPI_GPIO
- default "pc3"
- help
- The pin to use for SPI chip select.
-
-config ETRAX_SPI_CLK_PIN
- string "SPI clock pin"
- depends on ETRAX_SPI_GPIO
- default "pc1"
- help
- The pin to use for the SPI clock.
-
-config ETRAX_SPI_DATAIN_PIN
- string "SPI MISO (data in) pin"
- depends on ETRAX_SPI_GPIO
- default "pc16"
- help
- The pin to use for SPI data in from the device.
-
-config ETRAX_SPI_DATAOUT_PIN
- string "SPI MOSI (data out) pin"
- depends on ETRAX_SPI_GPIO
- default "pc0"
- help
- The pin to use for SPI data out to the device.
-
-config ETRAX_SPI_MMC_CD_GPIO_PIN
- string "MMC/SD card detect pin for SPI using gpio (space for none)"
- depends on ETRAX_SPI_GPIO && MMC_SPI
- default "pd11"
- help
- The pin to use for SD/MMC card detect. This pin should be pulled up
- and grounded when a card is present. If defined as " " (space), no
- pin is selected. A card must then always be inserted for proper
- action.
-
-config ETRAX_SPI_MMC_WP_GPIO_PIN
- string "MMC/SD card write-protect pin for SPI using gpio (space for none)"
- depends on ETRAX_SPI_GPIO && MMC_SPI
- default "pd10"
- help
- The pin to use for the SD/MMC write-protect signal for a memory
- card. If defined as " " (space), the card is considered writable.
-
endif
diff --git a/arch/cris/arch-v32/mach-a3/Kconfig b/arch/cris/arch-v32/mach-a3/Kconfig
index 7796aafc711..87547271a59 100644
--- a/arch/cris/arch-v32/mach-a3/Kconfig
+++ b/arch/cris/arch-v32/mach-a3/Kconfig
@@ -15,10 +15,6 @@ config ETRAX_SERIAL_PORTS
int
default 5
-config ETRAX_DDR
- bool
- default y
-
config ETRAX_DDR2_MRS
hex "DDR2 MRS"
default "0"
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index c0a29b96b92..15b815df29c 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -47,7 +47,6 @@ struct task_struct;
*/
#define task_pt_regs(task) user_regs(task_thread_info(task))
-#define current_regs() task_pt_regs(current)
unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/cris/include/uapi/asm/kvm_para.h b/arch/cris/include/uapi/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/cris/include/uapi/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 76069c18ee4..68232db98ba 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -114,6 +114,11 @@ int pud_huge(pud_t pud)
return 0;
}
+int pmd_huge_support(void)
+{
+ return 0;
+}
+
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index 3c52fa6d0f8..042431509b5 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -110,6 +110,11 @@ int pud_huge(pud_t pud)
return 0;
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 28813f16473..123919534b8 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -407,10 +407,9 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n",
+ pr_err("%s(%llx, %llx)\n",
__func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0a2c68f9f9b..0c4453f134c 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -46,11 +46,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
#ifdef CONFIG_EARLY_PRINTK
static char *stdout;
@@ -136,8 +131,7 @@ void __init early_init_devtree(void *params)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 7e954042f25..0fa0b69cdd5 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -58,8 +58,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index a7fee0dfb7a..01fda4419ed 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -85,6 +85,11 @@ int pud_huge(pud_t pud)
return (pud_val(pud) & _PAGE_HUGE) != 0;
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
+
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 222152a3f75..177d61de51c 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -171,10 +171,10 @@ ret_from_intr:
mov (REG_EPSW,fp),d0 # need to deliver signals before
# returning to userspace
and EPSW_nSL,d0
- beq resume_kernel # returning to supervisor mode
+ bne resume_userspace # returning to userspace
#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
+resume_kernel:
LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
@@ -189,6 +189,8 @@ need_resched:
bne restore_all
call preempt_schedule_irq[],0
jmp need_resched
+#else
+ jmp resume_kernel
#endif
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 5869e3fa5dd..a63e76872f8 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -55,11 +55,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
void __init early_init_devtree(void *params)
{
void *alloc;
@@ -96,8 +91,7 @@ void __init early_init_devtree(void *params)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6bfcab97c98..b7634ce41db 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -546,14 +546,8 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
-void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
-{
- return __va(memblock_alloc(size, align));
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7b6391b68fb..12e656ffe60 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1297,7 +1297,8 @@ static void __init prom_query_opal(void)
prom_opal_align = 0x10000;
}
-static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
+static int __init prom_rtas_call(int token, int nargs, int nret,
+ int *outputs, ...)
{
struct rtas_args rtas_args;
va_list list;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 442d8e23f8f..8e59abc237d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -611,6 +611,7 @@ int cpu_to_chip_id(int cpu)
of_node_put(np);
return of_get_ibm_chip_id(np);
}
+EXPORT_SYMBOL(cpu_to_chip_id);
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 76d8e7cc780..2dd69bf4af4 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -206,7 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
int trap = TRAP(regs);
int is_exec = trap == 0x400;
int fault;
- int rc = 0;
+ int rc = 0, store_update_sp = 0;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
@@ -280,6 +280,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ /*
+ * We want to do this outside mmap_sem, because reading code around nip
+ * can result in fault, which will cause a deadlock when called with
+ * mmap_sem held
+ */
+ if (user_mode(regs))
+ store_update_sp = store_updates_sp(regs);
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -345,8 +353,7 @@ retry:
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
- if (address + 2048 < uregs->gpr[1]
- && (!user_mode(regs) || !store_updates_sp(regs)))
+ if (address + 2048 < uregs->gpr[1] && !store_update_sp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 834ca8eb38f..d67db4bd672 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -86,6 +86,11 @@ int pgd_huge(pgd_t pgd)
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}
+
+int pmd_huge_support(void)
+{
+ return 1;
+}
#else
int pmd_huge(pmd_t pmd)
{
@@ -101,6 +106,11 @@ int pgd_huge(pgd_t pgd)
{
return 0;
}
+
+int pmd_huge_support(void)
+{
+ return 0;
+}
#endif
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index d64feb3ea0b..1f97e2b87a6 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
{
/* This would call on the cpuidle framework, and the back-end pseries
* driver to go to idle states
@@ -362,10 +362,22 @@ static void pSeries_idle(void)
if (cpuidle_idle_call()) {
/* On error, execute default handler
* to go into low thread priority and possibly
- * low power mode.
+ * low power mode by cedeing processor to hypervisor
*/
- HMT_low();
- HMT_very_low();
+
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ get_lppaca()->idle = 0;
}
}
@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- ppc_md.power_save = pSeries_idle;
- }
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
- else
+ } else {
+ /* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
+ }
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c696ad7d343..3ec272859e1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,7 @@ config S390
def_bool y
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -91,7 +92,6 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
@@ -135,15 +135,15 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select VIRT_TO_BUS
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
- select OLD_SIGSUSPEND3
select OLD_SIGACTION
+ select OLD_SIGSUSPEND3
select SYSCTL_EXCEPTION_TRACE
select USE_GENERIC_SMP_HELPERS if SMP
select VIRT_CPU_ACCOUNTING
+ select VIRT_TO_BUS
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -526,6 +526,7 @@ config CRASH_DUMP
bool "kernel crash dumps"
depends on 64BIT && SMP
select KEXEC
+ select ZFCPDUMP
help
Generate crash dump after being started by kexec.
Crash dump kernels are loaded in the main kernel with kexec-tools
@@ -536,7 +537,7 @@ config CRASH_DUMP
config ZFCPDUMP
def_bool n
prompt "zfcpdump support"
- select SMP
+ depends on SMP
help
Select this option if you want to build an zfcpdump enabled kernel.
Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index b74400e3e03..d204c65bf72 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,14 +1,13 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -27,6 +26,7 @@ CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
CONFIG_RD_XZ=y
CONFIG_RD_LZO=y
+CONFIG_RD_LZ4=y
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
@@ -38,11 +38,13 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_DEFAULT_DEADLINE=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -92,40 +94,49 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
CONFIG_TIMER_STATS=y
CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
-CONFIG_KPROBES_SANITY_TEST=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_PAGEALLOC=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBES_SANITY_TEST=y
# CONFIG_STRICT_DEVMEM is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
@@ -137,8 +148,10 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
@@ -165,6 +178,8 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 1eaa3625803..5f8bcc5fe42 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -78,10 +78,14 @@ typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
int register_external_interrupt(u16 code, ext_int_handler_t handler);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
-void service_subclass_irq_register(void);
-void service_subclass_irq_unregister(void);
-void measurement_alert_subclass_register(void);
-void measurement_alert_subclass_unregister(void);
+
+enum irq_subclass {
+ IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
+ IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+};
+
+void irq_subclass_register(enum irq_subclass subclass);
+void irq_subclass_unregister(enum irq_subclass subclass);
#define irq_canonicalize(irq) (irq)
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index dcf6948a875..4176dfe0fba 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,6 +31,8 @@
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
struct pt_regs;
struct kprobe;
@@ -57,7 +59,7 @@ typedef u16 kprobe_opcode_t;
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
- kprobe_opcode_t insn[MAX_INSN_SIZE];
+ kprobe_opcode_t *insn;
};
struct prev_kprobe {
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 06a13613604..7dc7f9c63b6 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -56,5 +56,6 @@ bool sclp_has_linemode(void);
bool sclp_has_vt220(void);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 8b6e4f5288a..1f1b8c70ab9 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -221,25 +221,26 @@ static int groups16_from_user(struct group_info *group_info, u16 __user *groupli
asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
{
+ const struct cred *cred = current_cred();
int i;
if (gidsetsize < 0)
return -EINVAL;
- get_group_info(current->cred->group_info);
- i = current->cred->group_info->ngroups;
+ get_group_info(cred->group_info);
+ i = cred->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
i = -EINVAL;
goto out;
}
- if (groups16_to_user(grouplist, current->cred->group_info)) {
+ if (groups16_to_user(grouplist, cred->group_info)) {
i = -EFAULT;
goto out;
}
}
out:
- put_group_info(current->cred->group_info);
+ put_group_info(cred->group_info);
return i;
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index c439ac9ced0..1389b637dae 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -332,9 +332,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
+ regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
+ regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
(u16 __force __user *)(frame->retcode)))
goto give_sigsegv;
@@ -400,9 +400,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
+ regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
+ regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __force __user *)(frame->retcode));
}
@@ -417,7 +417,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
- regs->psw.addr = (__u64) ka->sa.sa_handler;
+ regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d8f35565717..c84f33d51f7 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -16,6 +16,7 @@
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
+#include <asm/sclp.h>
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -64,22 +65,46 @@ static ssize_t copy_page_real(void *buf, void *src, size_t csize)
}
/*
- * Copy one page from "oldmem"
+ * Pointer to ELF header in new kernel
+ */
+static void *elfcorehdr_newmem;
+
+/*
+ * Copy one page from zfcpdump "oldmem"
+ *
+ * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise
+ * real memory copy is used.
+ */
+static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
+ unsigned long src, int userbuf)
+{
+ int rc;
+
+ if (src < ZFCPDUMP_HSA_SIZE) {
+ rc = memcpy_hsa(buf, src, csize, userbuf);
+ } else {
+ if (userbuf)
+ rc = copy_to_user_real((void __force __user *) buf,
+ (void *) src, csize);
+ else
+ rc = memcpy_real(buf, (void *) src, csize);
+ }
+ return rc ? rc : csize;
+}
+
+/*
+ * Copy one page from kdump "oldmem"
*
* For the kdump reserved memory this functions performs a swap operation:
* - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
* - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
*/
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
+ unsigned long src, int userbuf)
+
{
- unsigned long src;
int rc;
- if (!csize)
- return 0;
-
- src = (pfn << PAGE_SHIFT) + offset;
if (src < OLDMEM_SIZE)
src += OLDMEM_BASE;
else if (src > OLDMEM_BASE &&
@@ -90,7 +115,88 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
(void *) src, csize);
else
rc = copy_page_real(buf, (void *) src, csize);
- return (rc == 0) ? csize : rc;
+ return (rc == 0) ? rc : csize;
+}
+
+/*
+ * Copy one page from "oldmem"
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ unsigned long src;
+
+ if (!csize)
+ return 0;
+ src = (pfn << PAGE_SHIFT) + offset;
+ if (OLDMEM_BASE)
+ return copy_oldmem_page_kdump(buf, csize, src, userbuf);
+ else
+ return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
+}
+
+/*
+ * Remap "oldmem" for kdump
+ *
+ * For the kdump reserved memory this functions performs a swap operation:
+ * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
+ */
+static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long size_old;
+ int rc;
+
+ if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
+ size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+ rc = remap_pfn_range(vma, from,
+ pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+ size_old, prot);
+ if (rc || size == size_old)
+ return rc;
+ size -= size_old;
+ from += size_old;
+ pfn += size_old >> PAGE_SHIFT;
+ }
+ return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for zfcpdump
+ *
+ * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below
+ * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function.
+ */
+static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
+ unsigned long from,
+ unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long size_hsa;
+
+ if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) {
+ size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT));
+ if (size == size_hsa)
+ return 0;
+ size -= size_hsa;
+ from += size_hsa;
+ pfn += size_hsa >> PAGE_SHIFT;
+ }
+ return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Remap "oldmem" for kdump or zfcpdump
+ */
+int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ if (OLDMEM_BASE)
+ return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
+ else
+ return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
+ prot);
}
/*
@@ -101,11 +207,21 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
unsigned long copied = 0;
int rc;
- if ((unsigned long) src < OLDMEM_SIZE) {
- copied = min(count, OLDMEM_SIZE - (unsigned long) src);
- rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
- if (rc)
- return rc;
+ if (OLDMEM_BASE) {
+ if ((unsigned long) src < OLDMEM_SIZE) {
+ copied = min(count, OLDMEM_SIZE - (unsigned long) src);
+ rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+ if (rc)
+ return rc;
+ }
+ } else {
+ if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) {
+ copied = min(count,
+ ZFCPDUMP_HSA_SIZE - (unsigned long) src);
+ rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
+ if (rc)
+ return rc;
+ }
}
return memcpy_real(dest + copied, src + copied, count - copied);
}
@@ -368,14 +484,6 @@ static int get_mem_chunk_cnt(void)
}
/*
- * Relocate pointer in order to allow vmcore code access the data
- */
-static inline unsigned long relocate(unsigned long addr)
-{
- return OLDMEM_BASE + addr;
-}
-
-/*
* Initialize ELF loads (new kernel)
*/
static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
@@ -426,7 +534,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
ptr = nt_vmcoreinfo(ptr);
memset(phdr, 0, sizeof(*phdr));
phdr->p_type = PT_NOTE;
- phdr->p_offset = relocate(notes_offset);
+ phdr->p_offset = notes_offset;
phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
phdr->p_memsz = phdr->p_filesz;
return ptr;
@@ -435,7 +543,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
/*
* Create ELF core header (new kernel)
*/
-static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
+int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{
Elf64_Phdr *phdr_notes, *phdr_loads;
int mem_chunk_cnt;
@@ -443,6 +551,12 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
u32 alloc_size;
u64 hdr_off;
+ /* If we are not in kdump or zfcpdump mode return */
+ if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
+ return 0;
+ /* If elfcorehdr= has been passed via cmdline, we use that one */
+ if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
+ return 0;
mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
@@ -460,27 +574,52 @@ static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init loads */
hdr_off = PTR_DIFF(ptr, hdr);
- loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
- *elfcorebuf_sz = hdr_off;
- *elfcorebuf = (void *) relocate((unsigned long) hdr);
- BUG_ON(*elfcorebuf_sz > alloc_size);
+ loads_init(phdr_loads, hdr_off);
+ *addr = (unsigned long long) hdr;
+ elfcorehdr_newmem = hdr;
+ *size = (unsigned long long) hdr_off;
+ BUG_ON(elfcorehdr_size > alloc_size);
+ return 0;
}
/*
- * Create kdump ELF core header in new kernel, if it has not been passed via
- * the "elfcorehdr" kernel parameter
+ * Free ELF core header (new kernel)
*/
-static int setup_kdump_elfcorehdr(void)
+void elfcorehdr_free(unsigned long long addr)
{
- size_t elfcorebuf_sz;
- char *elfcorebuf;
+ if (!elfcorehdr_newmem)
+ return;
+ kfree((void *)(unsigned long)addr);
+}
- if (!OLDMEM_BASE || is_kdump_kernel())
- return -EINVAL;
- s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
- elfcorehdr_addr = (unsigned long long) elfcorebuf;
- elfcorehdr_size = elfcorebuf_sz;
- return 0;
+/*
+ * Read from ELF header
+ */
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+ void *src = (void *)(unsigned long)*ppos;
+
+ src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
+ memcpy(buf, src, count);
+ *ppos += count;
+ return count;
}
-subsys_initcall(setup_kdump_elfcorehdr);
+/*
+ * Read from ELF notes data
+ */
+ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+{
+ void *src = (void *)(unsigned long)*ppos;
+ int rc;
+
+ if (elfcorehdr_newmem) {
+ memcpy(buf, src, count);
+ } else {
+ rc = copy_from_oldmem(buf, src, count);
+ if (rc)
+ return rc;
+ }
+ *ppos += count;
+ return count;
+}
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 87acc38f73c..99e7f603589 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -40,14 +40,15 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
+ unsigned long addr;
while (1) {
sp = sp & PSW_ADDR_INSN;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
- printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+ addr = sf->gprs[8] & PSW_ADDR_INSN;
+ printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
/* Follow the backchain. */
while (1) {
low = sp;
@@ -57,16 +58,16 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
- printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+ addr = sf->gprs[8] & PSW_ADDR_INSN;
+ printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
- printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+ addr = regs->psw.addr & PSW_ADDR_INSN;
+ printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
low = sp;
sp = regs->gprs[15];
}
@@ -128,8 +129,7 @@ static void show_last_breaking_event(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+ printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
#endif
}
@@ -143,10 +143,10 @@ void show_registers(struct pt_regs *regs)
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p",
+ printk("%s PSW : %p %p (%pSR)\n",
mode, (void *) regs->psw.mask,
+ (void *) regs->psw.addr,
(void *) regs->psw.addr);
- print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 3ddbc26d246..e9b04c33d38 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -53,27 +53,21 @@ void handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
void do_notify_resume(struct pt_regs *regs);
-struct ext_code;
-void do_extint(struct pt_regs *regs);
+void __init init_IRQ(void);
+void do_IRQ(struct pt_regs *regs, int irq);
void do_restart(void);
void __init startup_init(void);
void die(struct pt_regs *regs, const char *str);
-
+int setup_profiling_timer(unsigned int multiplier);
void __init time_init(void);
+int pfn_is_nosave(unsigned long);
+void s390_early_resume(void);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
struct s390_mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
-long sys_mmap2(struct s390_mmap_arg_struct __user *arg);
-long sys_s390_ipc(uint call, int first, unsigned long second,
- unsigned long third, void __user *ptr);
-long sys_s390_personality(unsigned int personality);
-long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
- size_t len, int advice);
-long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
-long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
- u32 len_low);
long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index e3043aef87a..1014ad5f769 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <linux/kprobes.h>
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
+#include "entry.h"
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -177,7 +178,7 @@ int ftrace_enable_ftrace_graph_caller(void)
offset = ((void *) prepare_ftrace_return -
(void *) ftrace_graph_caller) / 2;
- return probe_kernel_write(ftrace_graph_caller + 2,
+ return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
@@ -185,7 +186,7 @@ int ftrace_disable_ftrace_graph_caller(void)
{
static unsigned short offset = 0x0002;
- return probe_kernel_write(ftrace_graph_caller + 2,
+ return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b34ba0ea96a..8ac2097f13d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -196,21 +196,23 @@ asmlinkage void do_softirq(void)
* ext_int_hash[index] is the list head for all external interrupts that hash
* to this index.
*/
-static struct list_head ext_int_hash[256];
+static struct hlist_head ext_int_hash[32] ____cacheline_aligned;
struct ext_int_info {
ext_int_handler_t handler;
- u16 code;
- struct list_head entry;
+ struct hlist_node entry;
struct rcu_head rcu;
+ u16 code;
};
/* ext_int_hash_lock protects the handler lists for external interrupts */
-DEFINE_SPINLOCK(ext_int_hash_lock);
+static DEFINE_SPINLOCK(ext_int_hash_lock);
static inline int ext_hash(u16 code)
{
- return (code + (code >> 9)) & 0xff;
+ BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash)));
+
+ return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
}
int register_external_interrupt(u16 code, ext_int_handler_t handler)
@@ -227,7 +229,7 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
- list_add_rcu(&p->entry, &ext_int_hash[index]);
+ hlist_add_head_rcu(&p->entry, &ext_int_hash[index]);
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
@@ -240,9 +242,9 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
int index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
- list_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
+ hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
if (p->code == code && p->handler == handler) {
- list_del_rcu(&p->entry);
+ hlist_del_rcu(&p->entry);
kfree_rcu(p, rcu);
}
}
@@ -264,12 +266,12 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
index = ext_hash(ext_code.code);
rcu_read_lock();
- list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
- if (likely(p->code == ext_code.code))
- p->handler(ext_code, regs->int_parm,
- regs->int_parm_long);
+ hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
+ if (unlikely(p->code != ext_code.code))
+ continue;
+ p->handler(ext_code, regs->int_parm, regs->int_parm_long);
+ }
rcu_read_unlock();
-
return IRQ_HANDLED;
}
@@ -283,55 +285,32 @@ void __init init_ext_interrupts(void)
int idx;
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
- INIT_LIST_HEAD(&ext_int_hash[idx]);
+ INIT_HLIST_HEAD(&ext_int_hash[idx]);
irq_set_chip_and_handler(EXT_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
setup_irq(EXT_INTERRUPT, &external_interrupt);
}
-static DEFINE_SPINLOCK(sc_irq_lock);
-static int sc_irq_refcount;
-
-void service_subclass_irq_register(void)
-{
- spin_lock(&sc_irq_lock);
- if (!sc_irq_refcount)
- ctl_set_bit(0, 9);
- sc_irq_refcount++;
- spin_unlock(&sc_irq_lock);
-}
-EXPORT_SYMBOL(service_subclass_irq_register);
-
-void service_subclass_irq_unregister(void)
-{
- spin_lock(&sc_irq_lock);
- sc_irq_refcount--;
- if (!sc_irq_refcount)
- ctl_clear_bit(0, 9);
- spin_unlock(&sc_irq_lock);
-}
-EXPORT_SYMBOL(service_subclass_irq_unregister);
-
-static DEFINE_SPINLOCK(ma_subclass_lock);
-static int ma_subclass_refcount;
+static DEFINE_SPINLOCK(irq_subclass_lock);
+static unsigned char irq_subclass_refcount[64];
-void measurement_alert_subclass_register(void)
+void irq_subclass_register(enum irq_subclass subclass)
{
- spin_lock(&ma_subclass_lock);
- if (!ma_subclass_refcount)
- ctl_set_bit(0, 5);
- ma_subclass_refcount++;
- spin_unlock(&ma_subclass_lock);
+ spin_lock(&irq_subclass_lock);
+ if (!irq_subclass_refcount[subclass])
+ ctl_set_bit(0, subclass);
+ irq_subclass_refcount[subclass]++;
+ spin_unlock(&irq_subclass_lock);
}
-EXPORT_SYMBOL(measurement_alert_subclass_register);
+EXPORT_SYMBOL(irq_subclass_register);
-void measurement_alert_subclass_unregister(void)
+void irq_subclass_unregister(enum irq_subclass subclass)
{
- spin_lock(&ma_subclass_lock);
- ma_subclass_refcount--;
- if (!ma_subclass_refcount)
- ctl_clear_bit(0, 5);
- spin_unlock(&ma_subclass_lock);
+ spin_lock(&irq_subclass_lock);
+ irq_subclass_refcount[subclass]--;
+ if (!irq_subclass_refcount[subclass])
+ ctl_clear_bit(0, subclass);
+ spin_unlock(&irq_subclass_lock);
}
-EXPORT_SYMBOL(measurement_alert_subclass_unregister);
+EXPORT_SYMBOL(irq_subclass_unregister);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index adbbe7f1cb0..0ce9fb24503 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -37,6 +37,26 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
+DEFINE_INSN_CACHE_OPS(dmainsn);
+
+static void *alloc_dmainsn_page(void)
+{
+ return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+}
+
+static void free_dmainsn_page(void *page)
+{
+ free_page((unsigned long)page);
+}
+
+struct kprobe_insn_cache kprobe_dmainsn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
+ .alloc = alloc_dmainsn_page,
+ .free = free_dmainsn_page,
+ .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
+ .insn_size = MAX_INSN_SIZE,
+};
+
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
switch (insn[0] >> 8) {
@@ -100,9 +120,8 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
- if ((insn[0] & 0x0f) == 0x00 || /* larl */
- (insn[0] & 0x0f) == 0x05) /* brasl */
- fixup |= FIXUP_RETURN_REGISTER;
+ if ((insn[0] & 0x0f) == 0x05) /* brasl */
+ fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
@@ -134,18 +153,128 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
return fixup;
}
+static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
+{
+ /* Check if we have a RIL-b or RIL-c format instruction which
+ * we need to modify in order to avoid instruction emulation. */
+ switch (insn[0] >> 8) {
+ case 0xc0:
+ if ((insn[0] & 0x0f) == 0x00) /* larl */
+ return true;
+ break;
+ case 0xc4:
+ switch (insn[0] & 0x0f) {
+ case 0x02: /* llhrl */
+ case 0x04: /* lghrl */
+ case 0x05: /* lhrl */
+ case 0x06: /* llghrl */
+ case 0x07: /* sthrl */
+ case 0x08: /* lgrl */
+ case 0x0b: /* stgrl */
+ case 0x0c: /* lgfrl */
+ case 0x0d: /* lrl */
+ case 0x0e: /* llgfrl */
+ case 0x0f: /* strl */
+ return true;
+ }
+ break;
+ case 0xc6:
+ switch (insn[0] & 0x0f) {
+ case 0x00: /* exrl */
+ case 0x02: /* pfdrl */
+ case 0x04: /* cghrl */
+ case 0x05: /* chrl */
+ case 0x06: /* clghrl */
+ case 0x07: /* clhrl */
+ case 0x08: /* cgrl */
+ case 0x0a: /* clgrl */
+ case 0x0c: /* cgfrl */
+ case 0x0d: /* crl */
+ case 0x0e: /* clgfrl */
+ case 0x0f: /* clrl */
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+static void __kprobes copy_instruction(struct kprobe *p)
+{
+ s64 disp, new_disp;
+ u64 addr, new_addr;
+
+ memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+ if (!is_insn_relative_long(p->ainsn.insn))
+ return;
+ /*
+ * For pc-relative instructions in RIL-b or RIL-c format patch the
+ * RI2 displacement field. We have already made sure that the insn
+ * slot for the patched instruction is within the same 2GB area
+ * as the original instruction (either kernel image or module area).
+ * Therefore the new displacement will always fit.
+ */
+ disp = *(s32 *)&p->ainsn.insn[1];
+ addr = (u64)(unsigned long)p->addr;
+ new_addr = (u64)(unsigned long)p->ainsn.insn;
+ new_disp = ((addr + (disp * 2)) - new_addr) / 2;
+ *(s32 *)&p->ainsn.insn[1] = new_disp;
+}
+
+static inline int is_kernel_addr(void *addr)
+{
+ return addr < (void *)_end;
+}
+
+static inline int is_module_addr(void *addr)
+{
+#ifdef CONFIG_64BIT
+ BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+ if (addr < (void *)MODULES_VADDR)
+ return 0;
+ if (addr > (void *)MODULES_END)
+ return 0;
+#endif
+ return 1;
+}
+
+static int __kprobes s390_get_insn_slot(struct kprobe *p)
+{
+ /*
+ * Get an insn slot that is within the same 2GB area like the original
+ * instruction. That way instructions with a 32bit signed displacement
+ * field can be patched and executed within the insn slot.
+ */
+ p->ainsn.insn = NULL;
+ if (is_kernel_addr(p->addr))
+ p->ainsn.insn = get_dmainsn_slot();
+ if (is_module_addr(p->addr))
+ p->ainsn.insn = get_insn_slot();
+ return p->ainsn.insn ? 0 : -ENOMEM;
+}
+
+static void __kprobes s390_free_insn_slot(struct kprobe *p)
+{
+ if (!p->ainsn.insn)
+ return;
+ if (is_kernel_addr(p->addr))
+ free_dmainsn_slot(p->ainsn.insn, 0);
+ else
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+}
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long) p->addr & 0x01)
return -EINVAL;
-
/* Make sure the probe isn't going on a difficult instruction */
if (is_prohibited_opcode(p->addr))
return -EINVAL;
-
+ if (s390_get_insn_slot(p))
+ return -ENOMEM;
p->opcode = *p->addr;
- memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
-
+ copy_instruction(p);
return 0;
}
@@ -186,6 +315,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
+ s390_free_insn_slot(p);
}
static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index ac2178161ec..719e27b2cf2 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -50,7 +50,7 @@ static void add_elf_notes(int cpu)
/*
* Initialize CPU ELF notes
*/
-void setup_regs(void)
+static void setup_regs(void)
{
unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
int cpu, this_cpu;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index fb99c2057b8..1105502bf6e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -274,7 +274,7 @@ static int reserve_pmc_hardware(void)
int flags = PMC_INIT;
on_each_cpu(setup_pmc_cpu, &flags, 1);
- measurement_alert_subclass_register();
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
@@ -285,7 +285,7 @@ static void release_pmc_hardware(void)
int flags = PMC_RELEASE;
on_each_cpu(setup_pmc_cpu, &flags, 1);
- measurement_alert_subclass_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
}
/* Release the PMU if event is the last perf event */
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 500aa1029bc..2343c218b8f 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -105,13 +105,10 @@ void perf_event_print_debug(void)
cpu = smp_processor_id();
memset(&cf_info, 0, sizeof(cf_info));
- if (!qctri(&cf_info)) {
+ if (!qctri(&cf_info))
pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
cpu, cf_info.cfvn, cf_info.csvn,
cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
- print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET,
- &cf_info, sizeof(cf_info));
- }
local_irq_restore(flags);
}
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 077a99389b0..e1c9d1c292f 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -139,10 +139,10 @@ static int __init runtime_instr_init(void)
if (!runtime_instr_avail())
return 0;
- measurement_alert_subclass_register();
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
if (rc)
- measurement_alert_subclass_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
else
pr_info("Runtime instrumentation facility initialized\n");
return rc;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d386c4e9d2e..1a4313a1b60 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -362,7 +362,7 @@ void smp_yield_cpu(int cpu)
* Send cpus emergency shutdown signal. This gives the cpus the
* opportunity to complete outstanding interrupts.
*/
-void smp_emergency_stop(cpumask_t *cpumask)
+static void smp_emergency_stop(cpumask_t *cpumask)
{
u64 end;
int cpu;
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 737bff38e3e..a7a7537ce1e 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -13,6 +13,7 @@
#include <asm/ipl.h>
#include <asm/cio.h>
#include <asm/pci.h>
+#include "entry.h"
/*
* References to section boundaries
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index f00aefb66a4..7de4469915f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -673,7 +673,7 @@ static int __init pfault_irq_init(void)
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 248445f9260..d261c62e40a 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -223,6 +223,11 @@ int pud_huge(pud_t pud)
return 0;
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write)
{
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 921fa541dc0..d1e0e0c7a7e 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -14,6 +14,7 @@
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <asm/ctl_reg.h>
+#include <asm/io.h>
/*
* This function writes to kernel memory bypassing DAT and possible
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index bf7c0dc64a7..de8cbc30dcd 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -245,7 +245,9 @@ EXPORT_SYMBOL_GPL(gmap_disable);
* gmap_alloc_table is assumed to be called with mmap_sem held
*/
static int gmap_alloc_table(struct gmap *gmap,
- unsigned long *table, unsigned long init)
+ unsigned long *table, unsigned long init)
+ __releases(&gmap->mm->page_table_lock)
+ __acquires(&gmap->mm->page_table_lock)
{
struct page *page;
unsigned long *new;
@@ -966,7 +968,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
tlb_remove_table(tlb, table);
}
-void __tlb_remove_table(void *_table)
+static void __tlb_remove_table(void *_table)
{
const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
void *table = (void *)((unsigned long) _table & ~mask);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d5f10a43a58..70923928586 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -805,7 +805,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
return NULL;
memset(header, 0, sz);
header->pages = sz / PAGE_SIZE;
- hole = sz - bpfsize + sizeof(*header);
+ hole = sz - (bpfsize + sizeof(*header));
/* Insert random number of illegal instructions before BPF code
* and make sure the first instruction starts at an even address.
*/
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index b5b2916895e..231cecafc2f 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1001,7 +1001,7 @@ int hwsampler_deallocate(void)
if (hws_state != HWS_STOPPED)
goto deallocate_exit;
- measurement_alert_subclass_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
deallocate_sdbt();
hws_state = HWS_DEALLOCATED;
@@ -1115,7 +1115,7 @@ int hwsampler_shutdown(void)
mutex_lock(&hws_sem);
if (hws_state == HWS_STOPPED) {
- measurement_alert_subclass_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
deallocate_sdbt();
}
if (hws_wq) {
@@ -1190,7 +1190,7 @@ start_all_exit:
hws_oom = 1;
hws_flush_all = 0;
/* now let them in, 1407 CPUMF external interrupts */
- measurement_alert_subclass_register();
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 65dd81baa7f..1fa8be40977 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -600,37 +600,13 @@ static struct platform_device sdhi0_power = {
},
};
-static void sdhi0_set_pwr(struct platform_device *pdev, int state)
-{
- static int power_gpio = -EINVAL;
-
- if (power_gpio < 0) {
- int ret = gpio_request(GPIO_PTB6, NULL);
- if (!ret) {
- power_gpio = GPIO_PTB6;
- gpio_direction_output(power_gpio, 0);
- }
- }
-
- /*
- * Toggle the GPIO regardless, whether we managed to grab it above or
- * the fixed regulator driver did.
- */
- gpio_set_value(GPIO_PTB6, state);
-}
-
-static int sdhi0_get_cd(struct platform_device *pdev)
-{
- return !gpio_get_value(GPIO_PTY7);
-}
-
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
- .set_pwr = sdhi0_set_pwr,
.tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_NEEDS_POLL,
- .get_cd = sdhi0_get_cd,
+ .tmio_flags = TMIO_MMC_USE_GPIO_CD,
+ .cd_gpio = GPIO_PTY7,
};
static struct resource sdhi0_resources[] = {
@@ -656,39 +632,15 @@ static struct platform_device sdhi0_device = {
},
};
-static void cn12_set_pwr(struct platform_device *pdev, int state)
-{
- static int power_gpio = -EINVAL;
-
- if (power_gpio < 0) {
- int ret = gpio_request(GPIO_PTB7, NULL);
- if (!ret) {
- power_gpio = GPIO_PTB7;
- gpio_direction_output(power_gpio, 0);
- }
- }
-
- /*
- * Toggle the GPIO regardless, whether we managed to grab it above or
- * the fixed regulator driver did.
- */
- gpio_set_value(GPIO_PTB7, state);
-}
-
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SDHI1 */
-static int sdhi1_get_cd(struct platform_device *pdev)
-{
- return !gpio_get_value(GPIO_PTW7);
-}
-
static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_NEEDS_POLL,
- .set_pwr = cn12_set_pwr,
- .get_cd = sdhi1_get_cd,
+ .tmio_flags = TMIO_MMC_USE_GPIO_CD,
+ .cd_gpio = GPIO_PTW7,
};
static struct resource sdhi1_resources[] = {
@@ -718,27 +670,19 @@ static struct platform_device sdhi1_device = {
#else
/* MMC SPI */
-static int mmc_spi_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_PTY6);
-}
-
-static int mmc_spi_get_cd(struct device *dev)
-{
- return !gpio_get_value(GPIO_PTY7);
-}
-
static void mmc_spi_setpower(struct device *dev, unsigned int maskval)
{
gpio_set_value(GPIO_PTB6, maskval ? 1 : 0);
}
static struct mmc_spi_platform_data mmc_spi_info = {
- .get_ro = mmc_spi_get_ro,
- .get_cd = mmc_spi_get_cd,
.caps = MMC_CAP_NEEDS_POLL,
+ .caps2 = MMC_CAP2_RO_ACTIVE_HIGH,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */
.setpower = mmc_spi_setpower,
+ .flags = MMC_SPI_USE_CD_GPIO | MMC_SPI_USE_RO_GPIO,
+ .cd_gpio = GPIO_PTY7,
+ .ro_gpio = GPIO_PTY6,
};
static struct spi_board_info spi_bus[] = {
@@ -998,11 +942,6 @@ static struct platform_device vou_device = {
#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SH_MMCIF */
-static void mmcif_down_pwr(struct platform_device *pdev)
-{
- cn12_set_pwr(pdev, 0);
-}
-
static struct resource sh_mmcif_resources[] = {
[0] = {
.name = "SH_MMCIF",
@@ -1023,8 +962,6 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
- .set_pwr = cn12_set_pwr,
- .down_pwr = mmcif_down_pwr,
.sup_pclk = 0, /* SH7724: Max Pclk/2 */
.caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_8_BIT_DATA |
@@ -1341,10 +1278,6 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR6);
/* SD-card slot CN11 */
- /* Card-detect, used on CN11, either with SDHI0 or with SPI */
- gpio_request(GPIO_PTY7, NULL);
- gpio_direction_input(GPIO_PTY7);
-
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0WP, NULL);
@@ -1363,8 +1296,6 @@ static int __init arch_setup(void)
gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
- gpio_request(GPIO_PTY6, NULL); /* write protect */
- gpio_direction_input(GPIO_PTY6);
spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
#endif
@@ -1394,10 +1325,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_SDHI1D1, NULL);
gpio_request(GPIO_FN_SDHI1D0, NULL);
- /* Card-detect, used on CN12 with SDHI1 */
- gpio_request(GPIO_PTW7, NULL);
- gpio_direction_input(GPIO_PTW7);
-
cn12_enabled = true;
#endif
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index d7762349ea4..0d676a41081 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -83,6 +83,11 @@ int pud_huge(pud_t pud)
return 0;
}
+int pmd_huge_support(void)
+{
+ return 0;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 3d0ddbc005f..71368850dfc 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -169,10 +169,10 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
- ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
+ ret |= copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
sigset_from_compat(&new_ka.sa.sa_mask, &set32);
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- ret |= __get_user(u_restorer, &act->sa_restorer);
+ ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
if (ret)
return -EFAULT;
@@ -183,9 +183,9 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
if (!ret && oact) {
sigset_to_compat(&set32, &old_ka.sa.sa_mask);
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
- ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+ ret |= copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+ ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
ret = -EFAULT;
}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d2b59441ebd..96399646570 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -234,6 +234,11 @@ int pud_huge(pud_t pud)
return 0;
}
+int pmd_huge_support(void)
+{
+ return 0;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index e514899e110..0cb3bbaa580 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -166,6 +166,11 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
+
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3a16c1483b4..64507f35800 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -3,18 +3,23 @@
#ifdef __KERNEL__
+#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/nops.h>
#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 5
-#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+#ifdef CONFIG_X86_64
+# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
+#else
+# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
+#endif
static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:"
- STATIC_KEY_INITIAL_NOP
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 8d16befdec8..3d199945870 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -315,21 +315,6 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
- return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
- return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
- return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
-}
-
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
@@ -446,6 +431,7 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
#include <linux/log2.h>
static inline int pte_none(pte_t pte)
@@ -864,6 +850,24 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
{
}
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present(pte));
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present(pte));
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present(pte));
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f4843e03113..0ecac257fb2 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -75,6 +75,9 @@
* with swap entry format. On x86 bits 6 and 7 are *not* involved
* into swap entry computation, but bit 6 is used for nonlinear
* file mapping, so we borrow bit 7 for soft dirty tracking.
+ *
+ * Please note that this bit must be treated as swap dirty page
+ * mark if and only if the PTE has present bit clear!
*/
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index cf512003e66..e6d90babc24 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,6 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr)
{
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -84,14 +85,38 @@ static inline void __flush_tlb_one(unsigned long addr)
#ifndef CONFIG_SMP
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
+/* "_up" is for UniProcessor.
+ *
+ * This is a helper for other header functions. *Not* intended to be called
+ * directly. All global TLB flushes need to either call this, or to bump the
+ * vm statistics themselves.
+ */
+static inline void __flush_tlb_up(void)
+{
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb();
+}
+
+static inline void flush_tlb_all(void)
+{
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb_all();
+}
+
+static inline void flush_tlb(void)
+{
+ __flush_tlb_up();
+}
+
+static inline void local_flush_tlb(void)
+{
+ __flush_tlb_up();
+}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
- __flush_tlb();
+ __flush_tlb_up();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -105,14 +130,14 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
- __flush_tlb();
+ __flush_tlb_up();
}
static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (mm == current->active_mm)
- __flush_tlb();
+ __flush_tlb_up();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index d4cdfa67509..ce2d0a2c3e4 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,6 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Save MTRR state */
@@ -696,6 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 69eb2fa2549..376dc787344 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -52,8 +52,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 63bdb29b254..b3cd3ebae07 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/pci_ids.h>
+#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
#include <asm/io_apic.h>
@@ -216,6 +217,157 @@ static void __init intel_remapping_check(int num, int slot, int func)
}
+/*
+ * Systems with Intel graphics controllers set aside memory exclusively
+ * for gfx driver use. This memory is not marked in the E820 as reserved
+ * or as RAM, and so is subject to overlap from E820 manipulation later
+ * in the boot process. On some systems, MMIO space is allocated on top,
+ * despite the efforts of the "RAM buffer" approach, which simply rounds
+ * memory boundaries up to 64M to try to catch space that may decode
+ * as RAM and so is not suitable for MMIO.
+ *
+ * And yes, so far on current devices the base addr is always under 4G.
+ */
+static u32 __init intel_stolen_base(int num, int slot, int func)
+{
+ u32 base;
+
+ /*
+ * For the PCI IDs in this quirk, the stolen base is always
+ * in 0x5c, aka the BDSM register (yes that's really what
+ * it's called).
+ */
+ base = read_pci_config(num, slot, func, 0x5c);
+ base &= ~((1<<20) - 1);
+
+ return base;
+}
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+static size_t __init gen3_stolen_size(int num, int slot, int func)
+{
+ size_t stolen_size;
+ u16 gmch_ctrl;
+
+ gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
+
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ stolen_size = MB(1);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ stolen_size = MB(4);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ stolen_size = MB(8);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ stolen_size = MB(16);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ stolen_size = MB(48);
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+
+ return stolen_size;
+}
+
+static size_t __init gen6_stolen_size(int num, int slot, int func)
+{
+ u16 gmch_ctrl;
+
+ gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+ gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GMS_MASK;
+
+ return gmch_ctrl << 25; /* 32 MB units */
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
+static struct pci_device_id intel_stolen_ids[] __initdata = {
+ INTEL_I915G_IDS(gen3_stolen_size),
+ INTEL_I915GM_IDS(gen3_stolen_size),
+ INTEL_I945G_IDS(gen3_stolen_size),
+ INTEL_I945GM_IDS(gen3_stolen_size),
+ INTEL_VLV_M_IDS(gen3_stolen_size),
+ INTEL_VLV_D_IDS(gen3_stolen_size),
+ INTEL_PINEVIEW_IDS(gen3_stolen_size),
+ INTEL_I965G_IDS(gen3_stolen_size),
+ INTEL_G33_IDS(gen3_stolen_size),
+ INTEL_I965GM_IDS(gen3_stolen_size),
+ INTEL_GM45_IDS(gen3_stolen_size),
+ INTEL_G45_IDS(gen3_stolen_size),
+ INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
+ INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
+ INTEL_SNB_D_IDS(gen6_stolen_size),
+ INTEL_SNB_M_IDS(gen6_stolen_size),
+ INTEL_IVB_M_IDS(gen6_stolen_size),
+ INTEL_IVB_D_IDS(gen6_stolen_size),
+ INTEL_HSW_D_IDS(gen6_stolen_size),
+ INTEL_HSW_M_IDS(gen6_stolen_size),
+};
+
+static void __init intel_graphics_stolen(int num, int slot, int func)
+{
+ size_t size;
+ int i;
+ u32 start;
+ u16 device, subvendor, subdevice;
+
+ device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
+ subvendor = read_pci_config_16(num, slot, func,
+ PCI_SUBSYSTEM_VENDOR_ID);
+ subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
+
+ for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
+ if (intel_stolen_ids[i].device == device) {
+ stolen_size_fn stolen_size =
+ (stolen_size_fn)intel_stolen_ids[i].driver_data;
+ size = stolen_size(num, slot, func);
+ start = intel_stolen_base(num, slot, func);
+ if (size && start) {
+ /* Mark this space as reserved */
+ e820_add_region(start, size, E820_RESERVED);
+ sanitize_e820_map(e820.map,
+ ARRAY_SIZE(e820.map),
+ &e820.nr_map);
+ }
+ return;
+ }
+ }
+}
+
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -251,6 +403,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
+ QFLAG_APPLY_ONCE, intel_graphics_stolen },
{}
};
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 460f5d9ceeb..ee11b7dfbfb 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -24,18 +24,57 @@ union jump_code_union {
} __attribute__((packed));
};
+static void bug_at(unsigned char *ip, int line)
+{
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
+ ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+ BUG();
+}
+
static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t))
+ void *(*poker)(void *, const void *, size_t),
+ int init)
{
union jump_code_union code;
+ const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
if (type == JUMP_LABEL_ENABLE) {
+ /*
+ * We are enabling this jump label. If it is not a nop
+ * then something must have gone wrong.
+ */
+ if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
+ bug_at((void *)entry->code, __LINE__);
+
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
- } else
+ } else {
+ /*
+ * We are disabling this jump label. If it is not what
+ * we think it is, then something must have gone wrong.
+ * If this is the first initialization call, then we
+ * are converting the default nop to the ideal nop.
+ */
+ if (init) {
+ const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
+ if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
+ bug_at((void *)entry->code, __LINE__);
+ } else {
+ code.jump = 0xe9;
+ code.offset = entry->target -
+ (entry->code + JUMP_LABEL_NOP_SIZE);
+ if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
+ bug_at((void *)entry->code, __LINE__);
+ }
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+ }
/*
* Make text_poke_bp() a default fallback poker.
@@ -57,15 +96,38 @@ void arch_jump_label_transform(struct jump_entry *entry,
{
get_online_cpus();
mutex_lock(&text_mutex);
- __jump_label_transform(entry, type, NULL);
+ __jump_label_transform(entry, type, NULL, 0);
mutex_unlock(&text_mutex);
put_online_cpus();
}
+static enum {
+ JL_STATE_START,
+ JL_STATE_NO_UPDATE,
+ JL_STATE_UPDATE,
+} jlstate __initdata_or_module = JL_STATE_START;
+
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- __jump_label_transform(entry, type, text_poke_early);
+ /*
+ * This function is called at boot up and when modules are
+ * first loaded. Check if the default nop, the one that is
+ * inserted at compile time, is the ideal nop. If it is, then
+ * we do not need to update the nop, and we can leave it as is.
+ * If it is not, then we need to update the nop to the ideal nop.
+ */
+ if (jlstate == JL_STATE_START) {
+ const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
+ const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+
+ if (memcmp(ideal_nop, default_nop, 5) != 0)
+ jlstate = JL_STATE_UPDATE;
+ else
+ jlstate = JL_STATE_NO_UPDATE;
+ }
+ if (jlstate == JL_STATE_UPDATE)
+ __jump_label_transform(entry, type, text_poke_early, 1);
}
#endif
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 7e73e8c6909..9d980d88b74 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -59,6 +59,10 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
+int pmd_huge_support(void)
+{
+ return 0;
+}
#else
struct page *
@@ -77,6 +81,10 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_PSE);
}
+int pmd_huge_support(void)
+{
+ return 1;
+}
#endif
/* x86_64 also uses this file */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 282375f13c7..ae699b3bbac 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,6 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
+ count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb();
@@ -130,6 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start;
info.flush_end = end;
+ count_vm_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) {
unsigned int cpu;
@@ -149,6 +151,7 @@ void flush_tlb_current_task(void)
preempt_disable();
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -211,16 +214,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
/* tlb_flushall_shift is on balance point, details in commit log */
- if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+ if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
- else {
+ } else {
if (has_large_page(mm, start, end)) {
local_flush_tlb();
goto flush_all;
}
/* flush range by one by one 'invlpg' */
- for (addr = start; addr < end; addr += PAGE_SIZE)
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
+ }
if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
@@ -256,6 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info)
{
+ count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id());
@@ -263,6 +270,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
+ count_vm_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fc216dfbd9..fa6ade76ef3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1692,7 +1692,6 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
case CPU_UP_PREPARE:
xen_vcpu_setup(cpu);
if (xen_have_vector_callback) {
- xen_init_lock_cpu(cpu);
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 0d4ec35895d..8b901e8d782 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -990,10 +990,13 @@ int m2p_remove_override(struct page *page,
printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
+ put_balloon_scratch_page();
return -1;
}
- mcs = xen_mc_entry(
+ xen_mc_batch();
+
+ mcs = __xen_mc_entry(
sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
@@ -1003,12 +1006,11 @@ int m2p_remove_override(struct page *page,
MULTI_grant_table_op(mcs.mc,
GNTTABOP_unmap_and_replace, unmap_op, 1);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, scratch_page_address,
- pfn_pte(page_to_pfn(get_balloon_scratch_page()),
+ pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
+
xen_mc_issue(PARAVIRT_LAZY_MMU);
kmap_op->host_addr = 0;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 9235842cd76..d1e4777b4e7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -273,12 +273,20 @@ static void __init xen_smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu();
- /* We've switched to the "real" per-cpu gdt, so make sure the
- old memory can be recycled */
- make_lowmem_page_readwrite(xen_initial_gdt);
+ if (xen_pv_domain()) {
+ /* We've switched to the "real" per-cpu gdt, so make sure the
+ old memory can be recycled */
+ make_lowmem_page_readwrite(xen_initial_gdt);
- xen_filter_cpu_maps();
- xen_setup_vcpu_info_placement();
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+ }
+ /*
+ * The alternative logic (which patches the unlock/lock) runs before
+ * the smp bootup up code is activated. Hence we need to set this up
+ * the core kernel is being patched. Otherwise we will have only
+ * modules patched but not core code.
+ */
xen_init_spinlocks();
}
@@ -709,6 +717,15 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
WARN_ON(rc);
if (!rc)
rc = native_cpu_up(cpu, tidle);
+
+ /*
+ * We must initialize the slowpath CPU kicker _after_ the native
+ * path has executed. If we initialized it before none of the
+ * unlocker IPI kicks would reach the booting CPU as the booting
+ * CPU had not set itself 'online' in cpu_online_mask. That mask
+ * is checked when IPIs are sent (on HVM at least).
+ */
+ xen_init_lock_cpu(cpu);
return rc;
}
@@ -728,4 +745,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.cpu_die = xen_hvm_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+ smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0438b9324a7..253f63fceea 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -81,7 +81,6 @@ static inline void spin_time_accum_blocked(u64 start)
spinlock_stats.time_blocked += delta;
}
#else /* !CONFIG_XEN_DEBUG_FS */
-#define TIMEOUT (1 << 10)
static inline void add_stats(enum xen_contention_stat var, u32 val)
{
}
@@ -96,23 +95,6 @@ static inline void spin_time_accum_blocked(u64 start)
}
#endif /* CONFIG_XEN_DEBUG_FS */
-/*
- * Size struct xen_spinlock so it's the same as arch_spinlock_t.
- */
-#if NR_CPUS < 256
-typedef u8 xen_spinners_t;
-# define inc_spinners(xl) \
- asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
-# define dec_spinners(xl) \
- asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
-#else
-typedef u16 xen_spinners_t;
-# define inc_spinners(xl) \
- asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
-# define dec_spinners(xl) \
- asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
-#endif
-
struct xen_lock_waiting {
struct arch_spinlock *lock;
__ticket_t want;
@@ -123,6 +105,7 @@ static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
+static bool xen_pvspin = true;
static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
@@ -241,16 +224,12 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
+ if (!xen_pvspin)
+ return;
+
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
- /*
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
- * (xen: disable PV spinlocks on HVM)
- */
- if (xen_hvm_domain())
- return;
-
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
@@ -270,11 +249,7 @@ void xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
- /*
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
- * (xen: disable PV spinlocks on HVM)
- */
- if (xen_hvm_domain())
+ if (!xen_pvspin)
return;
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
@@ -283,16 +258,9 @@ void xen_uninit_lock_cpu(int cpu)
per_cpu(irq_name, cpu) = NULL;
}
-static bool xen_pvspin __initdata = true;
void __init xen_init_spinlocks(void)
{
- /*
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
- * (xen: disable PV spinlocks on HVM)
- */
- if (xen_hvm_domain())
- return;
if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
@@ -323,6 +291,9 @@ static int __init xen_spinlock_debugfs(void)
if (d_xen == NULL)
return -ENOMEM;
+ if (!xen_pvspin)
+ return 0;
+
d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 42a8bba0b0e..101012bc1ff 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -170,8 +170,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (void *)__va(start);
initrd_end = (void *)__va(end);
diff --git a/block/Kconfig b/block/Kconfig
index a7e40a7c821..7f38e40fee0 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,6 +99,12 @@ config BLK_DEV_THROTTLING
See Documentation/cgroups/blkio-controller.txt for more information.
+config CMDLINE_PARSER
+ bool "Block device command line partition parser"
+ default n
+ ---help---
+ Parsing command line, get the partitions information.
+
menu "Partition Types"
source "block/partitions/Kconfig"
diff --git a/block/Makefile b/block/Makefile
index 39b76ba66ff..4fa4be544ec 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
+obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 4464c823cff..46cd7bd18b3 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -367,7 +367,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
if (!icq)
return NULL;
- if (radix_tree_preload(gfp_mask) < 0) {
+ if (radix_tree_maybe_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5efc5a64718..3aa5b195f4d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -29,7 +29,7 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
int err;
unsigned long v;
- err = strict_strtoul(page, 10, &v);
+ err = kstrtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
new file mode 100644
index 00000000000..cc2637f8674
--- /dev/null
+++ b/block/cmdline-parser.c
@@ -0,0 +1,250 @@
+/*
+ * Parse command line, get partition information
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/cmdline-parser.h>
+
+static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+{
+ int ret = 0;
+ struct cmdline_subpart *new_subpart;
+
+ *subpart = NULL;
+
+ new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL);
+ if (!new_subpart)
+ return -ENOMEM;
+
+ if (*partdef == '-') {
+ new_subpart->size = (sector_t)(~0ULL);
+ partdef++;
+ } else {
+ new_subpart->size = (sector_t)memparse(partdef, &partdef);
+ if (new_subpart->size < (sector_t)PAGE_SIZE) {
+ pr_warn("cmdline partition size is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (*partdef == '@') {
+ partdef++;
+ new_subpart->from = (sector_t)memparse(partdef, &partdef);
+ } else {
+ new_subpart->from = (sector_t)(~0ULL);
+ }
+
+ if (*partdef == '(') {
+ int length;
+ char *next = strchr(++partdef, ')');
+
+ if (!next) {
+ pr_warn("cmdline partition format is invalid.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ length = min_t(int, next - partdef,
+ sizeof(new_subpart->name) - 1);
+ strncpy(new_subpart->name, partdef, length);
+ new_subpart->name[length] = '\0';
+
+ partdef = ++next;
+ } else
+ new_subpart->name[0] = '\0';
+
+ new_subpart->flags = 0;
+
+ if (!strncmp(partdef, "ro", 2)) {
+ new_subpart->flags |= PF_RDONLY;
+ partdef += 2;
+ }
+
+ if (!strncmp(partdef, "lk", 2)) {
+ new_subpart->flags |= PF_POWERUP_LOCK;
+ partdef += 2;
+ }
+
+ *subpart = new_subpart;
+ return 0;
+fail:
+ kfree(new_subpart);
+ return ret;
+}
+
+static void free_subpart(struct cmdline_parts *parts)
+{
+ struct cmdline_subpart *subpart;
+
+ while (parts->subpart) {
+ subpart = parts->subpart;
+ parts->subpart = subpart->next_subpart;
+ kfree(subpart);
+ }
+}
+
+static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+{
+ int ret = -EINVAL;
+ char *next;
+ int length;
+ struct cmdline_subpart **next_subpart;
+ struct cmdline_parts *newparts;
+ char buf[BDEVNAME_SIZE + 32 + 4];
+
+ *parts = NULL;
+
+ newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL);
+ if (!newparts)
+ return -ENOMEM;
+
+ next = strchr(bdevdef, ':');
+ if (!next) {
+ pr_warn("cmdline partition has no block device.");
+ goto fail;
+ }
+
+ length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+ strncpy(newparts->name, bdevdef, length);
+ newparts->name[length] = '\0';
+ newparts->nr_subparts = 0;
+
+ next_subpart = &newparts->subpart;
+
+ while (next && *(++next)) {
+ bdevdef = next;
+ next = strchr(bdevdef, ',');
+
+ length = (!next) ? (sizeof(buf) - 1) :
+ min_t(int, next - bdevdef, sizeof(buf) - 1);
+
+ strncpy(buf, bdevdef, length);
+ buf[length] = '\0';
+
+ ret = parse_subpart(next_subpart, buf);
+ if (ret)
+ goto fail;
+
+ newparts->nr_subparts++;
+ next_subpart = &(*next_subpart)->next_subpart;
+ }
+
+ if (!newparts->subpart) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *parts = newparts;
+
+ return 0;
+fail:
+ free_subpart(newparts);
+ kfree(newparts);
+ return ret;
+}
+
+void cmdline_parts_free(struct cmdline_parts **parts)
+{
+ struct cmdline_parts *next_parts;
+
+ while (*parts) {
+ next_parts = (*parts)->next_parts;
+ free_subpart(*parts);
+ kfree(*parts);
+ *parts = next_parts;
+ }
+}
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
+{
+ int ret;
+ char *buf;
+ char *pbuf;
+ char *next;
+ struct cmdline_parts **next_parts;
+
+ *parts = NULL;
+
+ next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ next_parts = parts;
+
+ while (next && *pbuf) {
+ next = strchr(pbuf, ';');
+ if (next)
+ *next = '\0';
+
+ ret = parse_parts(next_parts, pbuf);
+ if (ret)
+ goto fail;
+
+ if (next)
+ pbuf = ++next;
+
+ next_parts = &(*next_parts)->next_parts;
+ }
+
+ if (!*parts) {
+ pr_warn("cmdline partition has no valid partition.");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = 0;
+done:
+ kfree(buf);
+ return ret;
+
+fail:
+ cmdline_parts_free(parts);
+ goto done;
+}
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev)
+{
+ while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
+ parts = parts->next_parts;
+ return parts;
+}
+
+/*
+ * add_part()
+ * 0 success.
+ * 1 can not add so many partitions.
+ */
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param)
+
+{
+ sector_t from = 0;
+ struct cmdline_subpart *subpart;
+
+ for (subpart = parts->subpart; subpart;
+ subpart = subpart->next_subpart, slot++) {
+ if (subpart->from == (sector_t)(~0ULL))
+ subpart->from = from;
+ else
+ from = subpart->from;
+
+ if (from >= disk_size)
+ break;
+
+ if (subpart->size > (disk_size - from))
+ subpart->size = disk_size - from;
+
+ from += subpart->size;
+
+ if (add_part(slot, subpart, param))
+ break;
+ }
+}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7e5d474dc6b..fbd5a67cb77 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -70,7 +70,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
return ret;
ret = copy_to_user(ugeo, &geo, 4);
- ret |= __put_user(geo.start, &ugeo->start);
+ ret |= put_user(geo.start, &ugeo->start);
if (ret)
ret = -EFAULT;
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 4cebb2f0d2f..87a32086535 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -260,3 +260,10 @@ config SYSV68_PARTITION
partition table format used by Motorola Delta machines (using
sysv68).
Otherwise, say N.
+
+config CMDLINE_PARTITION
+ bool "Command line partition support" if PARTITION_ADVANCED
+ select CMDLINE_PARSER
+ help
+ Say Y here if you would read the partitions table from bootargs.
+ The format for the command line is just like mtdparts.
diff --git a/block/partitions/Makefile b/block/partitions/Makefile
index 2be4d7ba4e3..37a95270503 100644
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ACORN_PARTITION) += acorn.o
obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
obj-$(CONFIG_ATARI_PARTITION) += atari.o
obj-$(CONFIG_AIX_PARTITION) += aix.o
+obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
diff --git a/block/partitions/check.c b/block/partitions/check.c
index 19ba207ea7d..9ac1df74f69 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -34,6 +34,7 @@
#include "efi.h"
#include "karma.h"
#include "sysv68.h"
+#include "cmdline.h"
int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
@@ -65,6 +66,9 @@ static int (*check_part[])(struct parsed_partitions *) = {
adfspart_check_ADFS,
#endif
+#ifdef CONFIG_CMDLINE_PARTITION
+ cmdline_partition,
+#endif
#ifdef CONFIG_EFI_PARTITION
efi_partition, /* this must come before msdos */
#endif
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
new file mode 100644
index 00000000000..56cf4ffad51
--- /dev/null
+++ b/block/partitions/cmdline.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 HUAWEI
+ * Author: Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ * Read block device partition table from command line.
+ * The partition used for fixed block device (eMMC) embedded device.
+ * It is no MBR, save storage space. Bootloader can be easily accessed
+ * by absolute address of data on the block device.
+ * Users can easily change the partition.
+ *
+ * The format for the command line is just like mtdparts.
+ *
+ * Verbose config please reference "Documentation/block/cmdline-partition.txt"
+ *
+ */
+
+#include <linux/cmdline-parser.h>
+
+#include "check.h"
+#include "cmdline.h"
+
+static char *cmdline;
+static struct cmdline_parts *bdev_parts;
+
+static int add_part(int slot, struct cmdline_subpart *subpart, void *param)
+{
+ int label_min;
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname) + 4];
+ struct parsed_partitions *state = (struct parsed_partitions *)param;
+
+ if (slot >= state->limit)
+ return 1;
+
+ put_partition(state, slot, subpart->from >> 9,
+ subpart->size >> 9);
+
+ info = &state->parts[slot].info;
+
+ label_min = min_t(int, sizeof(info->volname) - 1,
+ sizeof(subpart->name));
+ strncpy(info->volname, subpart->name, label_min);
+ info->volname[label_min] = '\0';
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+ state->parts[slot].has_info = true;
+
+ return 0;
+}
+
+static int __init cmdline_parts_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+__setup("blkdevparts=", cmdline_parts_setup);
+
+/*
+ * Purpose: allocate cmdline partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ * 0 if this isn't our partition table
+ * 1 if successful
+ */
+int cmdline_partition(struct parsed_partitions *state)
+{
+ sector_t disk_size;
+ char bdev[BDEVNAME_SIZE];
+ struct cmdline_parts *parts;
+
+ if (cmdline) {
+ if (bdev_parts)
+ cmdline_parts_free(&bdev_parts);
+
+ if (cmdline_parts_parse(&bdev_parts, cmdline)) {
+ cmdline = NULL;
+ return -1;
+ }
+ cmdline = NULL;
+ }
+
+ if (!bdev_parts)
+ return 0;
+
+ bdevname(state->bdev, bdev);
+ parts = cmdline_parts_find(bdev_parts, bdev);
+ if (!parts)
+ return 0;
+
+ disk_size = get_capacity(state->bdev->bd_disk) << 9;
+
+ cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ return 1;
+}
diff --git a/block/partitions/cmdline.h b/block/partitions/cmdline.h
new file mode 100644
index 00000000000..26e0f8da141
--- /dev/null
+++ b/block/partitions/cmdline.h
@@ -0,0 +1,2 @@
+
+int cmdline_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index c85fc895ecd..1a5ec9a03c0 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -25,6 +25,9 @@
* TODO:
*
* Changelog:
+ * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com>
+ * - detect hybrid MBRs, tighter pMBR checking & cleanups.
+ *
* Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
* - test for valid PMBR and valid PGPT before ever reading
* AGPT, allow override with 'gpt' kernel command line option.
@@ -149,34 +152,80 @@ static u64 last_lba(struct block_device *bdev)
bdev_logical_block_size(bdev)) - 1ULL;
}
-static inline int
-pmbr_part_valid(struct partition *part)
+static inline int pmbr_part_valid(gpt_mbr_record *part)
{
- if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
- le32_to_cpu(part->start_sect) == 1UL)
- return 1;
- return 0;
+ if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT)
+ goto invalid;
+
+ /* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */
+ if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA)
+ goto invalid;
+
+ return GPT_MBR_PROTECTIVE;
+invalid:
+ return 0;
}
/**
* is_pmbr_valid(): test Protective MBR for validity
* @mbr: pointer to a legacy mbr structure
+ * @total_sectors: amount of sectors in the device
*
- * Description: Returns 1 if PMBR is valid, 0 otherwise.
- * Validity depends on two things:
+ * Description: Checks for a valid protective or hybrid
+ * master boot record (MBR). The validity of a pMBR depends
+ * on all of the following properties:
* 1) MSDOS signature is in the last two bytes of the MBR
* 2) One partition of type 0xEE is found
+ *
+ * In addition, a hybrid MBR will have up to three additional
+ * primary partitions, which point to the same space that's
+ * marked out by up to three GPT partitions.
+ *
+ * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or
+ * GPT_MBR_HYBRID depending on the device layout.
*/
-static int
-is_pmbr_valid(legacy_mbr *mbr)
+static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
{
- int i;
+ int i, part = 0, ret = 0; /* invalid by default */
+
if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
- return 0;
+ goto done;
+
+ for (i = 0; i < 4; i++) {
+ ret = pmbr_part_valid(&mbr->partition_record[i]);
+ if (ret == GPT_MBR_PROTECTIVE) {
+ part = i;
+ /*
+ * Ok, we at least know that there's a protective MBR,
+ * now check if there are other partition types for
+ * hybrid MBR.
+ */
+ goto check_hybrid;
+ }
+ }
+
+ if (ret != GPT_MBR_PROTECTIVE)
+ goto done;
+check_hybrid:
for (i = 0; i < 4; i++)
- if (pmbr_part_valid(&mbr->partition_record[i]))
- return 1;
- return 0;
+ if ((mbr->partition_record[i].os_type !=
+ EFI_PMBR_OSTYPE_EFI_GPT) &&
+ (mbr->partition_record[i].os_type != 0x00))
+ ret = GPT_MBR_HYBRID;
+
+ /*
+ * Protective MBRs take up the lesser of the whole disk
+ * or 2 TiB (32bit LBA), ignoring the rest of the disk.
+ *
+ * Hybrid MBRs do not necessarily comply with this.
+ */
+ if (ret == GPT_MBR_PROTECTIVE) {
+ if (le32_to_cpu(mbr->partition_record[part].size_in_lba) !=
+ min((uint32_t) total_sectors - 1, 0xFFFFFFFF))
+ ret = 0;
+ }
+done:
+ return ret;
}
/**
@@ -243,8 +292,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
return NULL;
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
- (u8 *) pte,
- count) < count) {
+ (u8 *) pte, count) < count) {
kfree(pte);
pte=NULL;
return NULL;
@@ -364,7 +412,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
(unsigned long long)lastlba);
goto fail;
}
-
+ if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) {
+ pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+ (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba));
+ goto fail;
+ }
/* Check that sizeof_partition_entry has the correct value */
if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
pr_debug("GUID Partitition Entry Size check failed.\n");
@@ -429,44 +482,42 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
if (!pgpt || !agpt)
return;
if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header LBA != Alt. header alternate_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->my_lba),
(unsigned long long)le64_to_cpu(agpt->alternate_lba));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
- printk(KERN_WARNING
- "GPT:Primary header alternate_lba != Alt. header my_lba\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)le64_to_cpu(agpt->my_lba));
error_found++;
}
if (le64_to_cpu(pgpt->first_usable_lba) !=
le64_to_cpu(agpt->first_usable_lba)) {
- printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:first_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
(unsigned long long)le64_to_cpu(agpt->first_usable_lba));
error_found++;
}
if (le64_to_cpu(pgpt->last_usable_lba) !=
le64_to_cpu(agpt->last_usable_lba)) {
- printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:last_usable_lbas don't match.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
(unsigned long long)le64_to_cpu(agpt->last_usable_lba));
error_found++;
}
if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
- printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+ pr_warn("GPT:disk_guids don't match.\n");
error_found++;
}
if (le32_to_cpu(pgpt->num_partition_entries) !=
le32_to_cpu(agpt->num_partition_entries)) {
- printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+ pr_warn("GPT:num_partition_entries don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->num_partition_entries),
le32_to_cpu(agpt->num_partition_entries));
@@ -474,8 +525,7 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
le32_to_cpu(agpt->sizeof_partition_entry)) {
- printk(KERN_WARNING
- "GPT:sizeof_partition_entry values don't match: "
+ pr_warn("GPT:sizeof_partition_entry values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->sizeof_partition_entry),
le32_to_cpu(agpt->sizeof_partition_entry));
@@ -483,34 +533,30 @@ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
}
if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
le32_to_cpu(agpt->partition_entry_array_crc32)) {
- printk(KERN_WARNING
- "GPT:partition_entry_array_crc32 values don't match: "
+ pr_warn("GPT:partition_entry_array_crc32 values don't match: "
"0x%x != 0x%x\n",
le32_to_cpu(pgpt->partition_entry_array_crc32),
le32_to_cpu(agpt->partition_entry_array_crc32));
error_found++;
}
if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(pgpt->alternate_lba),
(unsigned long long)lastlba);
error_found++;
}
if (le64_to_cpu(agpt->my_lba) != lastlba) {
- printk(KERN_WARNING
- "GPT:Alternate GPT header not at the end of the disk.\n");
- printk(KERN_WARNING "GPT:%lld != %lld\n",
+ pr_warn("GPT:Alternate GPT header not at the end of the disk.\n");
+ pr_warn("GPT:%lld != %lld\n",
(unsigned long long)le64_to_cpu(agpt->my_lba),
(unsigned long long)lastlba);
error_found++;
}
if (error_found)
- printk(KERN_WARNING
- "GPT: Use GNU Parted to correct GPT errors.\n");
+ pr_warn("GPT: Use GNU Parted to correct GPT errors.\n");
return;
}
@@ -536,6 +582,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
gpt_header *pgpt = NULL, *agpt = NULL;
gpt_entry *pptes = NULL, *aptes = NULL;
legacy_mbr *legacymbr;
+ sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
u64 lastlba;
if (!ptes)
@@ -543,17 +590,22 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
lastlba = last_lba(state->bdev);
if (!force_gpt) {
- /* This will be added to the EFI Spec. per Intel after v1.02. */
- legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
- if (legacymbr) {
- read_lba(state, 0, (u8 *) legacymbr,
- sizeof (*legacymbr));
- good_pmbr = is_pmbr_valid(legacymbr);
- kfree(legacymbr);
- }
- if (!good_pmbr)
- goto fail;
- }
+ /* This will be added to the EFI Spec. per Intel after v1.02. */
+ legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL);
+ if (!legacymbr)
+ goto fail;
+
+ read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr));
+ good_pmbr = is_pmbr_valid(legacymbr, total_sectors);
+ kfree(legacymbr);
+
+ if (!good_pmbr)
+ goto fail;
+
+ pr_debug("Device has a %s MBR\n",
+ good_pmbr == GPT_MBR_PROTECTIVE ?
+ "protective" : "hybrid");
+ }
good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
&pgpt, &pptes);
@@ -576,11 +628,8 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = pptes;
kfree(agpt);
kfree(aptes);
- if (!good_agpt) {
- printk(KERN_WARNING
- "Alternate GPT is invalid, "
- "using primary GPT.\n");
- }
+ if (!good_agpt)
+ pr_warn("Alternate GPT is invalid, using primary GPT.\n");
return 1;
}
else if (good_agpt) {
@@ -588,8 +637,7 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
*ptes = aptes;
kfree(pgpt);
kfree(pptes);
- printk(KERN_WARNING
- "Primary GPT is invalid, using alternate GPT.\n");
+ pr_warn("Primary GPT is invalid, using alternate GPT.\n");
return 1;
}
@@ -651,8 +699,7 @@ int efi_partition(struct parsed_partitions *state)
put_partition(state, i+1, start * ssz, size * ssz);
/* If this is a RAID volume, tell md */
- if (!efi_guidcmp(ptes[i].partition_type_guid,
- PARTITION_LINUX_RAID_GUID))
+ if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID))
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
info = &state->parts[i + 1].info;
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index b69ab729558..4efcafba7e6 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -37,6 +37,9 @@
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+#define GPT_MBR_PROTECTIVE 1
+#define GPT_MBR_HYBRID 2
+
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -101,11 +104,25 @@ typedef struct _gpt_entry {
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
} __attribute__ ((packed)) gpt_entry;
+typedef struct _gpt_mbr_record {
+ u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
+ u8 start_head; /* unused by EFI, pt start in CHS */
+ u8 start_sector; /* unused by EFI, pt start in CHS */
+ u8 start_track;
+ u8 os_type; /* EFI and legacy non-EFI OS types */
+ u8 end_head; /* unused by EFI, pt end in CHS */
+ u8 end_sector; /* unused by EFI, pt end in CHS */
+ u8 end_track; /* unused by EFI, pt end in CHS */
+ __le32 starting_lba; /* used by EFI - start addr of the on disk pt */
+ __le32 size_in_lba; /* used by EFI - size of pt in LBA */
+} __packed gpt_mbr_record;
+
+
typedef struct _legacy_mbr {
u8 boot_code[440];
__le32 unique_mbr_signature;
__le16 unknown;
- struct partition partition_record[4];
+ gpt_mbr_record partition_record[4];
__le16 signature;
} __attribute__ ((packed)) legacy_mbr;
@@ -113,22 +130,3 @@ typedef struct _legacy_mbr {
extern int efi_partition(struct parsed_partitions *state);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * --------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1219ab7c310..1e16cbd61da 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -77,9 +77,36 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
return dmabuf->ops->mmap(dmabuf, vma);
}
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct dma_buf *dmabuf;
+ loff_t base;
+
+ if (!is_dma_buf_file(file))
+ return -EBADF;
+
+ dmabuf = file->private_data;
+
+ /* only support discovering the end of the buffer,
+ but also allow SEEK_SET to maintain the idiomatic
+ SEEK_END(0), SEEK_CUR(0) pattern */
+ if (whence == SEEK_END)
+ base = dmabuf->size;
+ else if (whence == SEEK_SET)
+ base = 0;
+ else
+ return -EINVAL;
+
+ if (offset != 0)
+ return -EINVAL;
+
+ return base + offset;
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
+ .llseek = dma_buf_llseek,
};
/*
@@ -133,7 +160,12 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
dmabuf->exp_name = exp_name;
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+ if (IS_ERR(file)) {
+ kfree(dmabuf);
+ return ERR_CAST(file);
+ }
+ file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
mutex_init(&dmabuf->lock);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 025c41d3cb3..14a9d191231 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "83"
+#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -169,6 +169,7 @@ struct aoedev {
ulong ref;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
+ struct dentry *debugfs;
struct request_queue *blkq;
struct hd_geometry geo;
sector_t ssize;
@@ -206,6 +207,7 @@ struct ktstate {
int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
+void aoedisk_rm_debugfs(struct aoedev *d);
void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 916d9ed5c8a..dd73e1ff175 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
+/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
/*
* aoeblk.c
* block device routines
@@ -17,11 +17,13 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+static struct dentry *aoe_debugfs_dir;
/* GPFS needs a larger value than the default. */
static int aoe_maxsectors;
@@ -108,6 +110,55 @@ static ssize_t aoedisk_show_payload(struct device *dev,
return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
}
+static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+{
+ struct aoedev *d;
+ struct aoetgt **t, **te;
+ struct aoeif *ifp, *ife;
+ unsigned long flags;
+ char c;
+
+ d = s->private;
+ seq_printf(s, "rttavg: %d rttdev: %d\n",
+ d->rttavg >> RTTSCALE,
+ d->rttdev >> RTTDSCALE);
+ seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
+ seq_printf(s, "kicked: %ld\n", d->kicked);
+ seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
+ seq_printf(s, "ref: %ld\n", d->ref);
+
+ spin_lock_irqsave(&d->lock, flags);
+ t = d->targets;
+ te = t + d->ntargets;
+ for (; t < te && *t; t++) {
+ c = '\t';
+ seq_printf(s, "falloc: %ld\n", (*t)->falloc);
+ seq_printf(s, "ffree: %p\n",
+ list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
+ seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
+ (*t)->maxout, (*t)->nframes);
+ seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
+ seq_printf(s, "\ttaint:%d\n", (*t)->taint);
+ seq_printf(s, "\tr:%d\n", (*t)->rpkts);
+ seq_printf(s, "\tw:%d\n", (*t)->wpkts);
+ ifp = (*t)->ifs;
+ ife = ifp + ARRAY_SIZE((*t)->ifs);
+ for (; ifp->nd && ifp < ife; ifp++) {
+ seq_printf(s, "%c%s", c, ifp->nd->name);
+ c = ',';
+ }
+ seq_puts(s, "\n");
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ return 0;
+}
+
+static int aoe_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aoedisk_debugfs_show, inode->i_private);
+}
+
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
@@ -130,6 +181,44 @@ static const struct attribute_group attr_group = {
.attrs = aoe_attrs,
};
+static const struct file_operations aoe_debugfs_fops = {
+ .open = aoe_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void
+aoedisk_add_debugfs(struct aoedev *d)
+{
+ struct dentry *entry;
+ char *p;
+
+ if (aoe_debugfs_dir == NULL)
+ return;
+ p = strchr(d->gd->disk_name, '/');
+ if (p == NULL)
+ p = d->gd->disk_name;
+ else
+ p++;
+ BUG_ON(*p == '\0');
+ entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+ &aoe_debugfs_fops);
+ if (IS_ERR_OR_NULL(entry)) {
+ pr_info("aoe: cannot create debugfs file for %s\n",
+ d->gd->disk_name);
+ return;
+ }
+ BUG_ON(d->debugfs);
+ d->debugfs = entry;
+}
+void
+aoedisk_rm_debugfs(struct aoedev *d)
+{
+ debugfs_remove(d->debugfs);
+ d->debugfs = NULL;
+}
+
static int
aoedisk_add_sysfs(struct aoedev *d)
{
@@ -330,6 +419,7 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+ aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
@@ -351,6 +441,8 @@ err:
void
aoeblk_exit(void)
{
+ debugfs_remove_recursive(aoe_debugfs_dir);
+ aoe_debugfs_dir = NULL;
kmem_cache_destroy(buf_pool_cache);
}
@@ -362,7 +454,11 @@ aoeblk_init(void)
0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
-
+ aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
+ if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
+ pr_info("aoe: cannot create debugfs directory\n");
+ aoe_debugfs_dir = NULL;
+ }
return 0;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 4d45dba7fb8..d2515435e23 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -380,7 +380,6 @@ aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
struct buf *buf;
- struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
@@ -391,7 +390,6 @@ aoecmd_ata_rw(struct aoedev *d)
f = newframe(d);
if (f == NULL)
return 0;
- t = *d->tgt;
bcnt = d->maxbcnt;
if (bcnt == 0)
bcnt = DEFAULTBCNT;
@@ -485,7 +483,6 @@ resend(struct aoedev *d, struct frame *f)
struct sk_buff *skb;
struct sk_buff_head queue;
struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct aoetgt *t;
char buf[128];
u32 n;
@@ -500,7 +497,6 @@ resend(struct aoedev *d, struct frame *f)
return;
}
h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
if (!(f->flags & FFL_PROBE)) {
snprintf(buf, sizeof(buf),
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 784c92e038d..e774c50b684 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -12,6 +12,7 @@
#include <linux/bitmap.h>
#include <linux/kdev_t.h>
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include "aoe.h"
static void dummy_timer(ulong);
@@ -241,16 +242,12 @@ aoedev_downdev(struct aoedev *d)
static int
user_req(char *s, size_t slen, struct aoedev *d)
{
- char *p;
+ const char *p;
size_t lim;
if (!d->gd)
return 0;
- p = strrchr(d->gd->disk_name, '/');
- if (!p)
- p = d->gd->disk_name;
- else
- p += 1;
+ p = kbasename(d->gd->disk_name);
lim = sizeof(d->gd->disk_name);
lim -= p - d->gd->disk_name;
if (slen < lim)
@@ -278,6 +275,7 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
+ aoedisk_rm_debugfs(d);
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2cc80b..d2d95ff5353 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -4258,6 +4258,13 @@ static void cciss_find_board_params(ctlr_info_t *h)
h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
/*
+ * The P600 may exhibit poor performnace under some workloads
+ * if we use the value in the configuration table. Limit this
+ * controller to MAXSGENTRIES (32) instead.
+ */
+ if (h->board_id == 0x3225103C)
+ h->maxsgentries = MAXSGENTRIES;
+ /*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index a56cfcd5d64..77a60bedd7a 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -636,7 +636,7 @@ ok_to_write:
mg_request(host->breq);
}
-void mg_times_out(unsigned long data)
+static void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1bbc681688e..79aa179305b 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -598,7 +598,7 @@ static ssize_t class_osdblk_remove(struct class *c,
unsigned long ul;
struct list_head *tmp;
- rc = strict_strtoul(buf, 10, &ul);
+ rc = kstrtoul(buf, 10, &ul);
if (rc)
return rc;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5d0ea11d9f..56188475cfd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -44,6 +44,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pktcdvd.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -69,23 +71,24 @@
#define DRIVER_NAME "pktcdvd"
-#if PACKET_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
-
-#if PACKET_DEBUG > 1
-#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
-#else
-#define VPRINTK(fmt, args...)
-#endif
+#define pkt_err(pd, fmt, ...) \
+ pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...) \
+ pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...) \
+do { \
+ if (level == 2 && PACKET_DEBUG >= 2) \
+ pr_notice("%s: %s():" fmt, \
+ pd->name, __func__, ##__VA_ARGS__); \
+ else if (level == 1 && PACKET_DEBUG >= 1) \
+ pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
+} while (0)
#define MAX_SPEED 0xffff
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
- ~(sector_t)((pd)->settings.size - 1))
-
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
@@ -103,7 +106,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
static int pkt_seq_show(struct seq_file *m, void *p);
-
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
/*
* create and register a pktcdvd kernel object.
@@ -424,7 +430,7 @@ static int pkt_sysfs_init(void)
if (ret) {
kfree(class_pktcdvd);
class_pktcdvd = NULL;
- printk(DRIVER_NAME": failed to create class pktcdvd\n");
+ pr_err("failed to create class pktcdvd\n");
return ret;
}
return 0;
@@ -517,7 +523,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
{
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- VPRINTK(DRIVER_NAME": queue empty\n");
+ pkt_dbg(2, pd, "queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
@@ -734,36 +740,33 @@ out:
return ret;
}
+static const char *sense_key_string(__u8 index)
+{
+ static const char * const info[] = {
+ "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check",
+ };
+
+ return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
/*
* A generic sense dump / resolve mechanism should be implemented across
* all ATAPI + SCSI devices.
*/
-static void pkt_dump_sense(struct packet_command *cgc)
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+ struct packet_command *cgc)
{
- static char *info[9] = { "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check" };
- int i;
struct request_sense *sense = cgc->sense;
- printk(DRIVER_NAME":");
- for (i = 0; i < CDROM_PACKET_SIZE; i++)
- printk(" %02x", cgc->cmd[i]);
- printk(" - ");
-
- if (sense == NULL) {
- printk("no sense\n");
- return;
- }
-
- printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
-
- if (sense->sense_key > 8) {
- printk(" (INVALID)\n");
- return;
- }
-
- printk(" (%s)\n", info[sense->sense_key]);
+ if (sense)
+ pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ CDROM_PACKET_SIZE, cgc->cmd,
+ sense->sense_key, sense->asc, sense->ascq,
+ sense_key_string(sense->sense_key));
+ else
+ pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
/*
@@ -806,7 +809,7 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
cgc.cmd[5] = write_speed & 0xff;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -872,7 +875,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": write, waiting\n");
+ pkt_dbg(2, pd, "write, waiting\n");
break;
}
pkt_flush_cache(pd);
@@ -881,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- VPRINTK(DRIVER_NAME": read, waiting\n");
+ pkt_dbg(2, pd, "read, waiting\n");
break;
}
pd->iosched.writing = 1;
@@ -943,7 +946,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else {
- printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
+ pkt_err(pd, "cdrom max_phys_segments too small\n");
return -EIO;
}
}
@@ -987,8 +990,9 @@ static void pkt_end_io_read(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
- (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+ pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, (unsigned long long)pkt->sector,
+ (unsigned long long)bio->bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1005,7 +1009,7 @@ static void pkt_end_io_packet_write(struct bio *bio, int err)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
@@ -1047,7 +1051,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
- VPRINTK("pkt_gather_data: zone %llx cached\n",
+ pkt_dbg(2, pd, "zone %llx cached\n",
(unsigned long long)pkt->sector);
goto out_account;
}
@@ -1070,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+ pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
f, pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
@@ -1082,7 +1086,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
}
out_account:
- VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "need %d frames for zone %llx\n",
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
@@ -1183,7 +1187,8 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
- VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+ pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, (unsigned long long)pkt->sector,
state_name[old_state], state_name[state]);
#endif
pkt->state = state;
@@ -1202,12 +1207,10 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
struct rb_node *n;
int wakeup;
- VPRINTK("handle_queue\n");
-
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
- VPRINTK("handle_queue: no pkt\n");
+ pkt_dbg(2, pd, "no pkt\n");
return 0;
}
@@ -1224,7 +1227,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = ZONE(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1244,7 +1247,7 @@ try_next_bio:
}
spin_unlock(&pd->lock);
if (!bio) {
- VPRINTK("handle_queue: no bio\n");
+ pkt_dbg(2, pd, "no bio\n");
return 0;
}
@@ -1260,12 +1263,12 @@ try_next_bio:
* to this packet.
*/
spin_lock(&pd->lock);
- VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+ pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- VPRINTK("pkt_handle_queue: found zone=%llx\n",
- (unsigned long long)ZONE(bio->bi_sector, pd));
- if (ZONE(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n",
+ (unsigned long long)get_zone(bio->bi_sector, pd));
+ if (get_zone(bio->bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
@@ -1316,7 +1319,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
}
- VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
+ pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
@@ -1327,7 +1330,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
- VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+ pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
pkt->write_size, (unsigned long long)pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
@@ -1359,7 +1362,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
{
int uptodate;
- VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+ pkt_dbg(2, pd, "pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
@@ -1398,7 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (pkt_start_recovery(pkt)) {
pkt_start_write(pd, pkt);
} else {
- VPRINTK("No recovery possible\n");
+ pkt_dbg(2, pd, "No recovery possible\n");
pkt_set_state(pkt, PACKET_FINISHED_STATE);
}
break;
@@ -1419,8 +1422,6 @@ static void pkt_handle_packets(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
- VPRINTK("pkt_handle_packets\n");
-
/*
* Run state machine for active packets
*/
@@ -1502,9 +1503,9 @@ static int kcdrwd(void *foobar)
if (PACKET_DEBUG > 1) {
int states[PACKET_NUM_STATES];
pkt_count_states(pd, states);
- VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3],
- states[4], states[5]);
+ pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2],
+ states[3], states[4], states[5]);
}
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
@@ -1513,9 +1514,9 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- VPRINTK("kcdrwd: sleeping\n");
+ pkt_dbg(2, pd, "sleeping\n");
residue = schedule_timeout(min_sleep_time);
- VPRINTK("kcdrwd: wake up\n");
+ pkt_dbg(2, pd, "wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
@@ -1563,9 +1564,10 @@ work_to_do:
static void pkt_print_settings(struct pktcdvd_device *pd)
{
- printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
- printk("%u blocks, ", pd->settings.size >> 2);
- printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+ pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ pd->settings.fp ? "Fixed" : "Variable",
+ pd->settings.size >> 2,
+ pd->settings.block_mode == 8 ? '1' : '2');
}
static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
@@ -1699,7 +1701,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1714,7 +1716,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1749,14 +1751,14 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
/*
* paranoia
*/
- printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
+ pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
if ((ret = pkt_mode_select(pd, &cgc))) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -1793,7 +1795,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
if (ti->rt == 1 && ti->blank == 0)
return 1;
- printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
@@ -1811,7 +1813,8 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x12: /* DVD-RAM */
return 1;
default:
- VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
+ pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+ pd->mmc3_profile);
return 0;
}
@@ -1820,22 +1823,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
- printk(DRIVER_NAME": Unknown disc. No track?\n");
+ pkt_notice(pd, "unknown disc - no track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
- printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
+ pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
- printk(DRIVER_NAME": Disc not erasable\n");
+ pkt_notice(pd, "disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
- printk(DRIVER_NAME": Can't write to last track (reserved)\n");
+ pkt_err(pd, "can't write to last track (reserved)\n");
return 0;
}
@@ -1860,7 +1863,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
memset(&ti, 0, sizeof(track_information));
if ((ret = pkt_get_disc_info(pd, &di))) {
- printk("failed get_disc\n");
+ pkt_err(pd, "failed get_disc\n");
return ret;
}
@@ -1871,12 +1874,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
- printk(DRIVER_NAME": failed get_track\n");
+ pkt_err(pd, "failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
- printk(DRIVER_NAME": can't write to this track\n");
+ pkt_err(pd, "can't write to this track\n");
return -EROFS;
}
@@ -1886,11 +1889,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
- printk(DRIVER_NAME": detected zero packet size!\n");
+ pkt_notice(pd, "detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
- printk(DRIVER_NAME": packet size is too big\n");
+ pkt_err(pd, "packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -1932,7 +1935,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
- printk(DRIVER_NAME": unknown data mode\n");
+ pkt_err(pd, "unknown data mode\n");
return -EROFS;
}
return 0;
@@ -1966,10 +1969,10 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
ret = pkt_mode_select(pd, &cgc);
if (ret) {
- printk(DRIVER_NAME": write caching control failed\n");
- pkt_dump_sense(&cgc);
+ pkt_err(pd, "write caching control failed\n");
+ pkt_dump_sense(pd, &cgc);
} else if (!ret && set)
- printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
+ pkt_notice(pd, "enabled write caching\n");
return ret;
}
@@ -2005,7 +2008,7 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
sizeof(struct mode_page_header);
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
}
@@ -2064,7 +2067,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = 2;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
@@ -2079,16 +2082,16 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
cgc.cmd[8] = size;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
if (!(buf[6] & 0x40)) {
- printk(DRIVER_NAME": Disc type is not CD-RW\n");
+ pkt_notice(pd, "disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
- printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
+ pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
@@ -2108,14 +2111,14 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
*speed = us_clv_to_speed[sp];
break;
default:
- printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
+ pkt_notice(pd, "unknown disc sub-type %d\n", st);
return 1;
}
if (*speed) {
- printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
+ pkt_info(pd, "maximum media speed: %d\n", *speed);
return 0;
} else {
- printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
+ pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
return 1;
}
}
@@ -2126,7 +2129,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
struct request_sense sense;
int ret;
- VPRINTK(DRIVER_NAME": Performing OPC\n");
+ pkt_dbg(2, pd, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sense = &sense;
@@ -2134,7 +2137,7 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
- pkt_dump_sense(&cgc);
+ pkt_dump_sense(pd, &cgc);
return ret;
}
@@ -2144,12 +2147,12 @@ static int pkt_open_write(struct pktcdvd_device *pd)
unsigned int write_speed, media_write_speed, read_speed;
if ((ret = pkt_probe_settings(pd))) {
- VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
+ pkt_dbg(2, pd, "failed probe\n");
return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
- DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
+ pkt_dbg(1, pd, "failed saving write settings\n");
return -EIO;
}
@@ -2161,26 +2164,26 @@ static int pkt_open_write(struct pktcdvd_device *pd)
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
- DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
+ pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
break;
default:
if ((ret = pkt_media_speed(pd, &media_write_speed)))
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
- DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
+ pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
- DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
+ pkt_dbg(1, pd, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
if ((ret = pkt_perform_opc(pd))) {
- DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
+ pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
}
return 0;
@@ -2205,7 +2208,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
goto out;
if ((ret = pkt_get_last_written(pd, &lba))) {
- printk(DRIVER_NAME": pkt_get_last_written failed\n");
+ pkt_err(pd, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2235,11 +2238,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- printk(DRIVER_NAME": not enough memory for buffers\n");
+ pkt_err(pd, "not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
- printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
+ pkt_info(pd, "%lukB available on disc\n", lba << 1);
}
return 0;
@@ -2257,7 +2260,7 @@ out:
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
if (flush && pkt_flush_cache(pd))
- DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
+ pkt_dbg(1, pd, "not flushing cache\n");
pkt_lock_door(pd, 0);
@@ -2279,8 +2282,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
struct pktcdvd_device *pd = NULL;
int ret;
- VPRINTK(DRIVER_NAME": entering open\n");
-
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
@@ -2315,7 +2316,6 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
out_dec:
pd->refcnt--;
out:
- VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
@@ -2360,7 +2360,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n",
+ bdevname(bio->bi_bdev, b));
goto end_io;
}
@@ -2382,20 +2383,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
- pd->name, (unsigned long long)bio->bi_sector);
+ pkt_notice(pd, "WRITE for ro device (%llu)\n",
+ (unsigned long long)bio->bi_sector);
goto end_io;
}
if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
- printk(DRIVER_NAME": wrong bio size\n");
+ pkt_err(pd, "wrong bio size\n");
goto end_io;
}
blk_queue_bounce(q, &bio);
- zone = ZONE(bio->bi_sector, pd);
- VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+ zone = get_zone(bio->bi_sector, pd);
+ pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
(unsigned long long)bio_end_sector(bio));
@@ -2405,7 +2406,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone;
int first_sectors;
- last_zone = ZONE(bio_end_sector(bio) - 1, pd);
+ last_zone = get_zone(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
@@ -2500,7 +2501,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
- sector_t zone = ZONE(bmd->bi_sector, pd);
+ sector_t zone = get_zone(bmd->bi_sector, pd);
int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
int remaining = (pd->settings.size << 9) - used;
int remaining2;
@@ -2609,7 +2610,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct block_device *bdev;
if (pd->pkt_dev == dev) {
- printk(DRIVER_NAME": Recursive setup not allowed\n");
+ pkt_err(pd, "recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
@@ -2617,11 +2618,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
+ pkt_err(pd, "%s already setup\n",
+ bdevname(pd2->bdev, b));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
- printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
+ pkt_err(pd, "can't chain pktcdvd devices\n");
return -EBUSY;
}
}
@@ -2644,13 +2646,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
- printk(DRIVER_NAME": can't start kernel thread\n");
+ pkt_err(pd, "can't start kernel thread\n");
ret = -ENOMEM;
goto out_mem;
}
proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
- DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+ pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
return 0;
out_mem:
@@ -2665,8 +2667,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
int ret;
- VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+ cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
@@ -2690,7 +2692,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
break;
default:
- VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
@@ -2743,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
if (!pkt_devs[idx])
break;
if (idx == MAX_WRITERS) {
- printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
+ pr_err("max %d writers supported\n", MAX_WRITERS);
ret = -EBUSY;
goto out_mutex;
}
@@ -2818,7 +2820,7 @@ out_mem:
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
- printk(DRIVER_NAME": setup of pktcdvd device failed\n");
+ pr_err("setup of pktcdvd device failed\n");
return ret;
}
@@ -2839,7 +2841,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
break;
}
if (idx == MAX_WRITERS) {
- DPRINTK(DRIVER_NAME": dev not setup\n");
+ pr_debug("dev not setup\n");
ret = -ENXIO;
goto out;
}
@@ -2859,7 +2861,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
- DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
+ pkt_dbg(1, pd, "writer unmapped\n");
del_gendisk(pd->disk);
blk_cleanup_queue(pd->disk->queue);
@@ -2969,7 +2971,7 @@ static int __init pkt_init(void)
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
- printk(DRIVER_NAME": Unable to register block device\n");
+ pr_err("unable to register block device\n");
goto out2;
}
if (!pktdev_major)
@@ -2983,7 +2985,7 @@ static int __init pkt_init(void)
ret = misc_register(&pkt_misc);
if (ret) {
- printk(DRIVER_NAME": Unable to register misc device\n");
+ pr_err("unable to register misc device\n");
goto out_misc;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 39c51cc7fab..b22a7d0fe5b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5132,7 +5132,7 @@ static ssize_t rbd_remove(struct bus_type *bus,
bool already = false;
int ret;
- ret = strict_strtoul(buf, 10, &ul);
+ ret = kstrtoul(buf, 10, &ul);
if (ret)
return ret;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8ed6ccb748c..b02d53a399f 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -924,7 +924,6 @@ static int swim_probe(struct platform_device *dev)
return 0;
out_kfree:
- platform_set_drvdata(dev, NULL);
kfree(swd);
out_iounmap:
iounmap(swim_base);
@@ -962,7 +961,6 @@ static int swim_remove(struct platform_device *dev)
if (res)
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(dev, NULL);
kfree(swd);
return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index fe5c3cd10c3..c2014a0aa20 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -620,7 +620,7 @@ static void backend_changed(struct xenbus_watch *watch,
}
/* Front end dir is a number, which is used as the handle. */
- err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+ err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
if (err)
return;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 4519cb33298..5796d0157ce 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -766,6 +766,25 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int tpm_tis_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int ret;
+
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
#ifdef CONFIG_PNP
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
@@ -787,26 +806,6 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
-static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev);
-}
-
-static int tpm_tis_pnp_resume(struct pnp_dev *dev)
-{
- struct tpm_chip *chip = pnp_get_drvdata(dev);
- int ret;
-
- if (chip->vendor.irq)
- tpm_tis_reenable_interrupts(chip);
-
- ret = tpm_pm_resume(&dev->dev);
- if (!ret)
- tpm_do_selftest(chip);
-
- return ret;
-}
-
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
@@ -835,9 +834,12 @@ static struct pnp_driver tis_pnp_driver = {
.name = "tpm_tis",
.id_table = tpm_pnp_tbl,
.probe = tpm_tis_pnp_init,
- .suspend = tpm_tis_pnp_suspend,
- .resume = tpm_tis_pnp_resume,
.remove = tpm_tis_pnp_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &tpm_tis_pm,
+ },
+#endif
};
#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
@@ -846,20 +848,6 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
-#ifdef CONFIG_PM_SLEEP
-static int tpm_tis_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- if (chip->vendor.irq)
- tpm_tis_reenable_interrupts(chip);
-
- return tpm_pm_resume(dev);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index daa4da281e5..526ec77c7ba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,6 +308,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config K3_DMA
+ tristate "Hisilicon K3 DMA support"
+ depends on ARCH_HI3xxx
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine for Hisilicon K3 platform
+ devices.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6d62ec30c4b..db89035b362 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5a18f82f732..e69b03c0fa5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct list_head resource_list;
struct resource_list_entry *rentry;
resource_size_t mem = 0, irq = 0;
- u32 vendor_id;
int ret;
if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
@@ -73,9 +72,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
return 0;
- vendor_id = le32_to_cpu(grp->vendor_id);
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
- (char *)&vendor_id, grp->device_id, grp->revision);
+ (char *)&grp->vendor_id, grp->device_id, grp->revision);
/* Check if the request line range is available */
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bff41d4848e..fce46c5bf1c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
*
* Documentation: ARM DDI 0196G == PL080
* Documentation: ARM DDI 0218E == PL081
+ * Documentation: S3C6410 User's Manual == PL080S
*
* PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
* channel.
@@ -36,6 +37,14 @@
*
* The PL080 has a dual bus master, PL081 has a single master.
*
+ * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+ * It differs in following aspects:
+ * - CH_CONFIG register at different offset,
+ * - separate CH_CONTROL2 register for transfer size,
+ * - bigger maximum transfer size,
+ * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+ * - no support for peripheral flow control.
+ *
* Memory to peripheral transfer may be visualized as
* Get data from memory to DMAC
* Until no data left
@@ -64,10 +73,7 @@
* - Peripheral flow control: the transfer size is ignored (and should be
* zero). The data is transferred from the current LLI entry, until
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
- * will then move to the next LLI entry.
- *
- * Global TODO:
- * - Break out common code from arch/arm/mach-s3c64xx and share
+ * will then move to the next LLI entry. Unsupported by PL080S.
*/
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@ struct pl08x_driver_data;
* @nomadik: whether the channels have Nomadik security extension bits
* that need to be checked for permission before use and some registers are
* missing
+ * @pl080s: whether this version is a PL080S, which has separate register and
+ * LLI word for transfer size.
*/
struct vendor_data {
+ u8 config_offset;
u8 channels;
bool dualmaster;
bool nomadik;
-};
-
-/*
- * PL08X private data structures
- * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info is in cctl. Also note that these
- * are fixed 32-bit quantities.
- */
-struct pl08x_lli {
- u32 src;
- u32 dst;
- u32 lli;
- u32 cctl;
+ bool pl080s;
+ u32 max_transfer_size;
};
/**
@@ -147,6 +145,7 @@ struct pl08x_bus_data {
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
+ void __iomem *reg_config;
spinlock_t lock;
struct pl08x_dma_chan *serving;
bool locked;
@@ -176,12 +175,13 @@ struct pl08x_sg {
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
+ * @cyclic: indicate cyclic transfers
*/
struct pl08x_txd {
struct virt_dma_desc vd;
struct list_head dsg_list;
dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
+ u32 *llis_va;
/* Default cctl value for LLIs */
u32 cctl;
/*
@@ -190,6 +190,7 @@ struct pl08x_txd {
*/
u32 ccfg;
bool done;
+ bool cyclic;
};
/**
@@ -265,17 +266,29 @@ struct pl08x_driver_data {
struct dma_pool *pool;
u8 lli_buses;
u8 mem_buses;
+ u8 lli_words;
};
/*
* PL08X specific defines
*/
-/* Size (bytes) of each LLI buffer allocated for one transfer */
-# define PL08X_LLI_TSFR_SIZE 0x2000
+/* The order of words in an LLI. */
+#define PL080_LLI_SRC 0
+#define PL080_LLI_DST 1
+#define PL080_LLI_LLI 2
+#define PL080_LLI_CCTL 3
+#define PL080S_LLI_CCTL2 4
+
+/* Total words in an LLI. */
+#define PL080_LLI_WORDS 4
+#define PL080S_LLI_WORDS 8
-/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+/*
+ * Number of LLIs in each LLI buffer allocated for one transfer
+ * (maximum times we call dma_pool_alloc on this pool without freeing)
+ */
+#define MAX_NUM_TSFR_LLIS 512
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -336,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
unsigned int val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
return val & PL080_CONFIG_ACTIVE;
}
+static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+{
+ if (pl08x->vd->pl080s)
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+ lli[PL080S_LLI_CCTL2], ccfg);
+ else
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+
+ writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+ writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+ writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+ writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+
+ if (pl08x->vd->pl080s)
+ writel_relaxed(lli[PL080S_LLI_CCTL2],
+ phychan->base + PL080S_CH_CONTROL2);
+
+ writel(ccfg, phychan->reg_config);
+}
+
/*
* Set the initial DMA register values i.e. those for the first LLI
* The next LLI pointer and the configuration interrupt bit have
@@ -352,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
struct pl08x_phy_chan *phychan = plchan->phychan;
struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
- struct pl08x_lli *lli;
u32 val;
list_del(&txd->vd.node);
@@ -363,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
- lli = &txd->llis_va[0];
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
- "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
- phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
- txd->ccfg);
-
- writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
- writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
- writel(lli->lli, phychan->base + PL080_CH_LLI);
- writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
- writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+ pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
@@ -383,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
cpu_relax();
/* Do not access config register until channel shows as inactive */
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
- writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
}
/*
@@ -406,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val |= PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
/* Wait for channel inactive */
for (timeout = 1000; timeout; timeout--) {
@@ -425,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
u32 val;
/* Clear the HALT bit */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val &= ~PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
}
/*
@@ -439,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->base + PL080_CH_CONFIG);
+ u32 val = readl(ch->reg_config);
val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK);
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -455,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The source width defines the number of bytes */
u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
case PL080_WIDTH_8BIT:
break;
@@ -471,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ const u32 *llis_va, *llis_va_limit;
struct pl08x_phy_chan *ch;
+ dma_addr_t llis_bus;
struct pl08x_txd *txd;
- size_t bytes = 0;
+ u32 llis_max_words;
+ size_t bytes;
+ u32 clli;
ch = plchan->phychan;
txd = plchan->at;
+ if (!ch || !txd)
+ return 0;
+
/*
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction.
*/
- if (ch && txd) {
- u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+ clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the remaining bytes in the active transfer */
+ /* First get the remaining bytes in the active transfer */
+ if (pl08x->vd->pl080s)
+ bytes = get_bytes_in_cctl_pl080s(
+ readl(ch->base + PL080_CH_CONTROL),
+ readl(ch->base + PL080S_CH_CONTROL2));
+ else
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
- if (clli) {
- struct pl08x_lli *llis_va = txd->llis_va;
- dma_addr_t llis_bus = txd->llis_bus;
- int index;
+ if (!clli)
+ return bytes;
- BUG_ON(clli < llis_bus || clli >= llis_bus +
- sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+ llis_va = txd->llis_va;
+ llis_bus = txd->llis_bus;
- /*
- * Locate the next LLI - as this is an array,
- * it's simple maths to find.
- */
- index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+ llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(u32) * llis_max_words);
- for (; index < MAX_NUM_TSFR_LLIS; index++) {
- bytes += get_bytes_in_cctl(llis_va[index].cctl);
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ llis_va += (clli - llis_bus) / sizeof(u32);
- /*
- * A LLI pointer of 0 terminates the LLI list
- */
- if (!llis_va[index].lli)
- break;
- }
- }
+ llis_va_limit = llis_va + llis_max_words;
+
+ for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+ if (pl08x->vd->pl080s)
+ bytes += get_bytes_in_cctl_pl080s(
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ else
+ bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+
+ /*
+ * A LLI pointer going backward terminates the LLI list
+ */
+ if (llis_va[PL080_LLI_LLI] <= clli)
+ break;
}
return bytes;
@@ -722,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
break;
}
+ tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
return retbits;
}
@@ -766,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
/*
* Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
- int num_llis, int len, u32 cctl)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl, u32 cctl2)
{
- struct pl08x_lli *llis_va = bd->txd->llis_va;
+ u32 offset = num_llis * pl08x->lli_words;
+ u32 *llis_va = bd->txd->llis_va + offset;
dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = bd->srcbus.addr;
- llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
- sizeof(struct pl08x_lli);
- llis_va[num_llis].lli |= bd->lli_bus;
+ /* Advance the offset to next LLI. */
+ offset += pl08x->lli_words;
+
+ llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+ llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+ llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+ llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+ llis_va[PL080_LLI_CCTL] = cctl;
+ if (pl08x->vd->pl080s)
+ llis_va[PL080S_LLI_CCTL2] = cctl2;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -791,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
- u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+ int num_llis, size_t *total_bytes)
{
*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
- pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
(*total_bytes) += len;
}
+#ifdef VERBOSE_DEBUG
+static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis)
+{
+ int i;
+
+ if (pl08x->vd->pl080s) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ llis_va += pl08x->lli_words;
+ }
+ } else {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL]);
+ llis_va += pl08x->lli_words;
+ }
+ }
+}
+#else
+static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis) {}
+#endif
+
/*
* This fills in the table of LLIs for the transfer descriptor
* Note that we assume we never have to change the burst sizes
@@ -812,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
int num_llis = 0;
u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli, total_bytes;
- struct pl08x_lli *llis_va;
+ u32 *llis_va, *last_lli;
struct pl08x_sg *dsg;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -902,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ 0, cctl, 0);
break;
}
@@ -924,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s byte width LLIs (remain 0x%08x)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
- &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+ num_llis++, &total_bytes);
}
if (bd.remainder) {
@@ -946,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* MIN(buswidths)
*/
max_bytes_per_lli = bd.srcbus.buswidth *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ pl08x->vd->max_transfer_size;
dev_vdbg(&pl08x->adev->dev,
"%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
@@ -981,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
- pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ lli_len, cctl, tsize);
total_bytes += lli_len;
}
@@ -993,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, bd.remainder,
- num_llis++, &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl,
+ bd.remainder, num_llis++, &total_bytes);
}
}
@@ -1008,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (num_llis >= MAX_NUM_TSFR_LLIS) {
dev_err(&pl08x->adev->dev,
"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
+ __func__, MAX_NUM_TSFR_LLIS);
return 0;
}
}
llis_va = txd->llis_va;
- /* The final LLI terminates the LLI. */
- llis_va[num_llis - 1].lli = 0;
- /* The final LLI element shall also fire an interrupt. */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
-
-#ifdef VERBOSE_DEBUG
- {
- int i;
+ last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
- dev_vdbg(&pl08x->adev->dev,
- "%-3s %-9s %-10s %-10s %-10s %s\n",
- "lli", "", "csrc", "cdst", "clli", "cctl");
- for (i = 0; i < num_llis; i++) {
- dev_vdbg(&pl08x->adev->dev,
- "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, &llis_va[i], llis_va[i].src,
- llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
- );
- }
+ if (txd->cyclic) {
+ /* Link back to the first LLI. */
+ last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+ } else {
+ /* The final LLI terminates the LLI. */
+ last_lli[PL080_LLI_LLI] = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
}
-#endif
+
+ pl08x_dump_lli(pl08x, llis_va, num_llis);
return num_llis;
}
@@ -1310,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
if (!plchan->slave)
return -EINVAL;
@@ -1319,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan,
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (config->device_fc && pl08x->vd->pl080s) {
+ dev_err(&pl08x->adev->dev,
+ "%s: PL080S does not support peripheral flow control\n",
+ __func__);
+ return -EINVAL;
+ }
+
plchan->cfg = *config;
return 0;
@@ -1409,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ dma_addr_t *slave_addr)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- struct pl08x_sg *dsg;
- struct scatterlist *sg;
enum dma_slave_buswidth addr_width;
- dma_addr_t slave_addr;
int ret, tmp;
u8 src_buses, dst_buses;
u32 maxburst, cctl;
- dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sg_dma_len(sgl), plchan->name);
-
txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1441,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
if (direction == DMA_MEM_TO_DEV) {
cctl = PL080_CONTROL_SRC_INCR;
- slave_addr = plchan->cfg.dst_addr;
+ *slave_addr = plchan->cfg.dst_addr;
addr_width = plchan->cfg.dst_addr_width;
maxburst = plchan->cfg.dst_maxburst;
src_buses = pl08x->mem_buses;
dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
cctl = PL080_CONTROL_DST_INCR;
- slave_addr = plchan->cfg.src_addr;
+ *slave_addr = plchan->cfg.src_addr;
addr_width = plchan->cfg.src_addr_width;
maxburst = plchan->cfg.src_maxburst;
src_buses = plchan->cd->periph_buses;
@@ -1497,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
else
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+ return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+ enum dma_transfer_direction direction,
+ dma_addr_t slave_addr,
+ dma_addr_t buf_addr,
+ unsigned int len)
+{
+ struct pl08x_sg *dsg;
+
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg)
+ return -ENOMEM;
+
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = len;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = buf_addr;
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = buf_addr;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ struct scatterlist *sg;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sg_dma_len(sgl), plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
- if (!dsg) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ sg_dma_address(sg),
+ sg_dma_len(sg));
+ if (ret) {
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
return NULL;
}
- list_add_tail(&dsg->node, &txd->dsg_list);
+ }
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else {
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev,
+ "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ __func__, period_len, buf_len,
+ direction == DMA_MEM_TO_DEV ? "to" : "from",
+ plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = true;
+ txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+ for (tmp = 0; tmp < buf_len; tmp += period_len) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ buf_addr + tmp, period_len);
+ if (ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
}
}
@@ -1657,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
spin_lock(&plchan->vc.lock);
tx = plchan->at;
- if (tx) {
+ if (tx && tx->cyclic) {
+ vchan_cyclic_callback(&tx->vd);
+ } else if (tx) {
plchan->at = NULL;
/*
* This descriptor is done, release its mux
@@ -1851,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
+ u32 tsfr_size;
int ret = 0;
int i;
@@ -1878,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1885,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
pl08x->slave.device_control = pl08x_control;
/* Get the platform data */
@@ -1907,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->mem_buses = pl08x->pd->mem_buses;
}
+ if (vd->pl080s)
+ pl08x->lli_words = PL080S_LLI_WORDS;
+ else
+ pl08x->lli_words = PL080_LLI_WORDS;
+ tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
- PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ tsfr_size, PL08X_ALIGN, 0);
if (!pl08x->pool) {
ret = -ENOMEM;
goto out_no_lli_pool;
@@ -1952,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
+ ch->reg_config = ch->base + vd->config_offset;
spin_lock_init(&ch->lock);
/*
@@ -1962,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (vd->nomadik) {
u32 val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
ch->locked = true;
@@ -2013,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
- amba_part(adev), amba_rev(adev),
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
return 0;
@@ -2043,22 +2249,41 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_nomadik = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
.nomadik = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
+static struct vendor_data vendor_pl080s = {
+ .config_offset = PL080S_CH_CONFIG,
+ .channels = 8,
+ .pl080s = true,
+ .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_pl081 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 2,
.dualmaster = false,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct amba_id pl08x_ids[] = {
+ /* Samsung PL080S variant */
+ {
+ .id = 0x0a141080,
+ .mask = 0xffffffff,
+ .data = &vendor_pl080s,
+ },
/* PL080 */
{
.id = 0x00041080,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index eee16b01fa8..9162ac80c18 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -509,7 +509,33 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
}
/**
- * dma_request_channel - try to allocate an exclusive channel
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0) {
+ err = dma_chan_get(chan);
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ } else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index eea479c1217..89eb89f2228 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -37,16 +37,22 @@
* which does not support descriptor writeback.
*/
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+ return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_dma_slave *dws = dwc->chan.private;
unsigned char mmax = dw->nr_masters - 1;
- if (dwc->request_line == ~0) {
- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
- }
+ if (!is_request_line_unset(dwc))
+ return;
+
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data)
static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
struct dw_dma *dw = dev_id;
- u32 status;
+ u32 status = dma_readl(dw, STATUS_INT);
+
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
- dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
- dma_readl(dw, STATUS_INT));
+ /* Check if we have any interrupt from the DMAC */
+ if (!status)
+ return IRQ_NONE;
/*
* Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
dwc->direction = sconfig->direction;
/* Take the request line from slave_id member */
- if (dwc->request_line == ~0)
+ if (is_request_line_unset(dwc))
dwc->request_line = sconfig->slave_id;
convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ if (ret == DMA_SUCCESS)
+ return ret;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
dma_set_residue(txstate, dwc_get_residue(dwc));
- if (dwc->paused)
+ if (dwc->paused && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
return ret;
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
- "dw_dmac", dw);
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+ IRQF_SHARED, "dw_dmac", dw);
if (err)
return err;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6c9449cffae..e35d9759031 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5f3e532436e..ff50ff4c6a5 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@ struct edma_desc {
struct list_head node;
int absync;
int pset_nr;
+ int processed;
struct edmacc_param pset[0];
};
@@ -69,6 +70,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
+ int missed;
struct dma_slave_config cfg;
};
@@ -104,22 +106,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
- struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+ struct virt_dma_desc *vdesc;
struct edma_desc *edesc;
- int i;
-
- if (!vdesc) {
- echan->edesc = NULL;
- return;
+ struct device *dev = echan->vchan.chan.device->dev;
+ int i, j, left, nslots;
+
+ /* If either we processed all psets or we're still not started */
+ if (!echan->edesc ||
+ echan->edesc->pset_nr == echan->edesc->processed) {
+ /* Get next vdesc */
+ vdesc = vchan_next_desc(&echan->vchan);
+ if (!vdesc) {
+ echan->edesc = NULL;
+ return;
+ }
+ list_del(&vdesc->node);
+ echan->edesc = to_edma_desc(&vdesc->tx);
}
- list_del(&vdesc->node);
+ edesc = echan->edesc;
- echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+ /* Find out how many left */
+ left = edesc->pset_nr - edesc->processed;
+ nslots = min(MAX_NR_SG, left);
/* Write descriptor PaRAM set(s) */
- for (i = 0; i < edesc->pset_nr; i++) {
- edma_write_slot(echan->slot[i], &edesc->pset[i]);
+ for (i = 0; i < nslots; i++) {
+ j = i + edesc->processed;
+ edma_write_slot(echan->slot[i], &edesc->pset[j]);
dev_dbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
@@ -132,24 +146,50 @@ static void edma_execute(struct edma_chan *echan)
" bidx\t%08x\n"
" cidx\t%08x\n"
" lkrld\t%08x\n",
- i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ j, echan->ch_num, echan->slot[i],
+ edesc->pset[j].opt,
+ edesc->pset[j].src,
+ edesc->pset[j].dst,
+ edesc->pset[j].a_b_cnt,
+ edesc->pset[j].ccnt,
+ edesc->pset[j].src_dst_bidx,
+ edesc->pset[j].src_dst_cidx,
+ edesc->pset[j].link_bcntrld);
/* Link to the previous slot if not the last set */
- if (i != (edesc->pset_nr - 1))
+ if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
- /* Final pset links to the dummy pset */
- else
- edma_link(echan->slot[i], echan->ecc->dummy_slot);
}
- edma_start(echan->ch_num);
+ edesc->processed += nslots;
+
+ /*
+ * If this is either the last set in a set of SG-list transactions
+ * then setup a link to the dummy slot, this results in all future
+ * events being absorbed and that's OK because we're done
+ */
+ if (edesc->processed == edesc->pset_nr)
+ edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+ edma_resume(echan->ch_num);
+
+ if (edesc->processed <= MAX_NR_SG) {
+ dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ edma_start(echan->ch_num);
+ }
+
+ /*
+ * This happens due to setup times between intermediate transfers
+ * in long SG lists which have to be broken up into transfers of
+ * MAX_NR_SG
+ */
+ if (echan->missed) {
+ dev_dbg(dev, "missed event in execute detected\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ echan->missed = 0;
+ }
}
static int edma_terminate_all(struct edma_chan *echan)
@@ -222,9 +262,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int i;
int acnt, bcnt, ccnt, src, dst, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int i, nslots;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
@@ -247,12 +287,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
- if (sg_len > MAX_NR_SG) {
- dev_err(dev, "Exceeded max SG segments %d > %d\n",
- sg_len, MAX_NR_SG);
- return NULL;
- }
-
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
@@ -262,8 +296,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->pset_nr = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- /* Allocate a PaRAM slot, if needed */
+ /* Allocate a PaRAM slot, if needed */
+ nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+ for (i = 0; i < nslots; i++) {
if (echan->slot[i] < 0) {
echan->slot[i] =
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
@@ -273,6 +309,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
}
+ }
+
+ /* Configure PaRAM sets for each SG */
+ for_each_sg(sgl, sg, sg_len, i) {
acnt = dev_width;
@@ -330,6 +370,12 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure A or AB synchronized transfers */
if (edesc->absync)
edesc->pset[i].opt |= SYNCDIM;
+
+ /* If this is the last in a current SG set of transactions,
+ enable interrupts so that next set is processed */
+ if (!((i+1) % MAX_NR_SG))
+ edesc->pset[i].opt |= TCINTEN;
+
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
@@ -355,27 +401,65 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
unsigned long flags;
+ struct edmacc_param p;
- /* Stop the channel */
- edma_stop(echan->ch_num);
+ /* Pause the channel */
+ edma_pause(echan->ch_num);
switch (ch_status) {
case DMA_COMPLETE:
- dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
-
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
if (edesc) {
+ if (edesc->processed == edesc->pset_nr) {
+ dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edma_stop(echan->ch_num);
+ vchan_cookie_complete(&edesc->vdesc);
+ } else {
+ dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ }
+
edma_execute(echan);
- vchan_cookie_complete(&edesc->vdesc);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
case DMA_CC_ERROR:
- dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
+ echan->missed = 1;
+ } else {
+ /*
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
+ */
+ dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ }
+
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
break;
default:
break;
@@ -502,8 +586,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
} else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
struct edma_desc *edesc = echan->edesc;
txstate->residue = edma_desc_size(edesc);
- } else {
- txstate->residue = 0;
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f2bf8c0c467..591cd8c63ab 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *state)
{
- struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&edmac->lock, flags);
- ret = dma_cookie_status(chan, cookie, state);
- spin_unlock_irqrestore(&edmac->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, state);
}
/**
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 49e8fbdb898..b3f3e90054f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct fsldma_chan *chan = to_fsl_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ff2aab973b4..78f8ca5fcce 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -805,10 +805,8 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
}
INIT_LIST_HEAD(&imxdmac->ld_free);
- if (imxdmac->sg_list) {
- kfree(imxdmac->sg_list);
- imxdmac->sg_list = NULL;
- }
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
}
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1e44b8cf95d..fc43603cf0b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -243,7 +243,6 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
- * @done channel completion
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -255,7 +254,6 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
- struct completion done;
unsigned int num_bd;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
@@ -307,9 +305,10 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
-enum sdma_devtype {
- IMX31_SDMA, /* runs on i.mx31 */
- IMX35_SDMA, /* runs on i.mx35 and later */
+struct sdma_driver_data {
+ int chnenbl0;
+ int num_events;
+ struct sdma_script_start_addrs *script_addrs;
};
struct sdma_engine {
@@ -318,8 +317,6 @@ struct sdma_engine {
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- enum sdma_devtype devtype;
- unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
@@ -327,15 +324,118 @@ struct sdma_engine {
struct clk *clk_ahb;
spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
+ const struct sdma_driver_data *drvdata;
+};
+
+static struct sdma_driver_data sdma_imx31 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX31,
+ .num_events = 32,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx25 = {
+ .ap_2_ap_addr = 729,
+ .uart_2_mcu_addr = 904,
+ .per_2_app_addr = 1255,
+ .mcu_2_app_addr = 834,
+ .uartsh_2_mcu_addr = 1120,
+ .per_2_shp_addr = 1329,
+ .mcu_2_shp_addr = 1048,
+ .ata_2_mcu_addr = 1560,
+ .mcu_2_ata_addr = 1479,
+ .app_2_per_addr = 1189,
+ .app_2_mcu_addr = 770,
+ .shp_2_per_addr = 1407,
+ .shp_2_mcu_addr = 979,
+};
+
+static struct sdma_driver_data sdma_imx25 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx25,
+};
+
+static struct sdma_driver_data sdma_imx35 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx51 = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .mcu_2_shp_addr = 961,
+ .ata_2_mcu_addr = 1473,
+ .mcu_2_ata_addr = 1392,
+ .app_2_per_addr = 1033,
+ .app_2_mcu_addr = 683,
+ .shp_2_per_addr = 1251,
+ .shp_2_mcu_addr = 892,
+};
+
+static struct sdma_driver_data sdma_imx51 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx51,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx53 = {
+ .ap_2_ap_addr = 642,
+ .app_2_mcu_addr = 683,
+ .mcu_2_app_addr = 747,
+ .uart_2_mcu_addr = 817,
+ .shp_2_mcu_addr = 891,
+ .mcu_2_shp_addr = 960,
+ .uartsh_2_mcu_addr = 1032,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+ .firi_2_mcu_addr = 1193,
+ .mcu_2_firi_addr = 1290,
+};
+
+static struct sdma_driver_data sdma_imx53 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx53,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx6q = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .per_2_per_addr = 6331,
+ .uartsh_2_mcu_addr = 1032,
+ .mcu_2_shp_addr = 960,
+ .app_2_mcu_addr = 683,
+ .shp_2_mcu_addr = 891,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6q = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6q,
};
static struct platform_device_id sdma_devtypes[] = {
{
+ .name = "imx25-sdma",
+ .driver_data = (unsigned long)&sdma_imx25,
+ }, {
.name = "imx31-sdma",
- .driver_data = IMX31_SDMA,
+ .driver_data = (unsigned long)&sdma_imx31,
}, {
.name = "imx35-sdma",
- .driver_data = IMX35_SDMA,
+ .driver_data = (unsigned long)&sdma_imx35,
+ }, {
+ .name = "imx51-sdma",
+ .driver_data = (unsigned long)&sdma_imx51,
+ }, {
+ .name = "imx53-sdma",
+ .driver_data = (unsigned long)&sdma_imx53,
+ }, {
+ .name = "imx6q-sdma",
+ .driver_data = (unsigned long)&sdma_imx6q,
}, {
/* sentinel */
}
@@ -343,8 +443,11 @@ static struct platform_device_id sdma_devtypes[] = {
MODULE_DEVICE_TABLE(platform, sdma_devtypes);
static const struct of_device_id sdma_dt_ids[] = {
- { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
- { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
+ { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
+ { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -356,8 +459,7 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
- SDMA_CHNENBL0_IMX35);
+ u32 chnenbl0 = sdma->drvdata->chnenbl0;
return chnenbl0 + event * 4;
}
@@ -547,8 +649,6 @@ static void sdma_tasklet(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
- complete(&sdmac->done);
-
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
@@ -733,7 +833,7 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
sdmac->per_addr = 0;
if (sdmac->event_id0) {
- if (sdmac->event_id0 >= sdmac->sdma->num_events)
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
}
@@ -812,9 +912,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
- init_completion(&sdmac->done);
-
return 0;
out:
@@ -1120,15 +1217,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
}
static enum dma_status sdma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- dma_cookie_t last_used;
-
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
@@ -1218,19 +1312,6 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->devtype) {
- case IMX31_SDMA:
- sdma->num_events = 32;
- break;
- case IMX35_SDMA:
- sdma->num_events = 48;
- break;
- default:
- dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
- sdma->devtype);
- return -ENODEV;
- }
-
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
@@ -1257,7 +1338,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
/* disable all channels */
- for (i = 0; i < sdma->num_events; i++)
+ for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
/* All channels have priority 0 */
@@ -1335,10 +1416,21 @@ static int __init sdma_probe(struct platform_device *pdev)
int ret;
int irq;
struct resource *iores;
- struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
+ const struct sdma_driver_data *drvdata = NULL;
+
+ if (of_id)
+ drvdata = of_id->data;
+ else if (pdev->id_entry)
+ drvdata = (void *)pdev->id_entry->driver_data;
+
+ if (!drvdata) {
+ dev_err(&pdev->dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
@@ -1347,6 +1439,7 @@ static int __init sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
+ sdma->drvdata = drvdata;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1396,10 +1489,6 @@ static int __init sdma_probe(struct platform_device *pdev)
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
- if (of_id)
- pdev->id_entry = of_id->data;
- sdma->devtype = pdev->id_entry->driver_data;
-
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1431,6 +1520,8 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
+ if (sdma->drvdata->script_addrs)
+ sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index cc727ec78c4..dd8b44a56e5 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
struct iop_adma_desc_slot *slot = NULL;
int init = iop_chan->slots_allocated ? 0 : 1;
struct iop_adma_platform_data *plat_data =
- iop_chan->device->pdev->dev.platform_data;
+ dev_get_platdata(&iop_chan->device->pdev->dev);
int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
/* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev)
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
struct iop_adma_chan *iop_chan;
- struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
dma_async_device_unregister(&device->common);
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev)
struct iop_adma_device *adev;
struct iop_adma_chan *iop_chan;
struct dma_device *dma_dev;
- struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index d39c2cd0795..cb9c0bc317e 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
- if (cookie != chan->cookie)
- return DMA_ERROR;
- return DMA_SUCCESS;
+ return dma_cookie_status(chan, cookie, txstate);
}
static int __init ipu_idmac_init(struct ipu *ipu)
@@ -1767,7 +1764,6 @@ static int ipu_remove(struct platform_device *pdev)
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
tasklet_kill(&ipu->tasklet);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
new file mode 100644
index 00000000000..a2c330f5f95
--- /dev/null
+++ b/drivers/dma/k3dma.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "k3-dma"
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1ffc
+
+#define INT_STAT 0x00
+#define INT_TC1 0x04
+#define INT_ERR1 0x0c
+#define INT_ERR2 0x10
+#define INT_TC1_MASK 0x18
+#define INT_ERR1_MASK 0x20
+#define INT_ERR2_MASK 0x24
+#define INT_TC1_RAW 0x600
+#define INT_ERR1_RAW 0x608
+#define INT_ERR2_RAW 0x610
+#define CH_PRI 0x688
+#define CH_STAT 0x690
+#define CX_CUR_CNT 0x704
+#define CX_LLI 0x800
+#define CX_CNT 0x810
+#define CX_SRC 0x814
+#define CX_DST 0x818
+#define CX_CFG 0x81c
+#define AXI_CFG 0x820
+#define AXI_CFG_DEFAULT 0x201201
+
+#define CX_LLI_CHAIN_EN 0x2
+#define CX_CFG_EN 0x1
+#define CX_CFG_MEM2PER (0x1 << 2)
+#define CX_CFG_PER2MEM (0x2 << 2)
+#define CX_CFG_SRCINCR (0x1 << 31)
+#define CX_CFG_DSTINCR (0x1 << 30)
+
+struct k3_desc_hw {
+ u32 lli;
+ u32 reserved[3];
+ u32 count;
+ u32 saddr;
+ u32 daddr;
+ u32 config;
+} __aligned(32);
+
+struct k3_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct k3_desc_hw desc_hw[0];
+};
+
+struct k3_dma_phy;
+
+struct k3_dma_chan {
+ u32 ccfg;
+ struct virt_dma_chan vc;
+ struct k3_dma_phy *phy;
+ struct list_head node;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+};
+
+struct k3_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct k3_dma_chan *vchan;
+ struct k3_dma_desc_sw *ds_run;
+ struct k3_dma_desc_sw *ds_done;
+};
+
+struct k3_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ struct list_head chan_pending;
+ struct k3_dma_phy *phy;
+ struct k3_dma_chan *chans;
+ struct clk *clk;
+ u32 dma_channels;
+ u32 dma_requests;
+};
+
+#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+
+static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct k3_dma_chan, vc.chan);
+}
+
+static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
+{
+ u32 val = 0;
+
+ if (on) {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val |= CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ } else {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val &= ~CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ }
+}
+
+static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
+{
+ u32 val = 0;
+
+ k3_dma_pause_dma(phy, false);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_ERR1_RAW);
+ writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
+{
+ writel_relaxed(hw->lli, phy->base + CX_LLI);
+ writel_relaxed(hw->count, phy->base + CX_CNT);
+ writel_relaxed(hw->saddr, phy->base + CX_SRC);
+ writel_relaxed(hw->daddr, phy->base + CX_DST);
+ writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+ writel_relaxed(hw->config, phy->base + CX_CFG);
+}
+
+static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
+{
+ u32 cnt = 0;
+
+ cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+ cnt &= 0xffff;
+ return cnt;
+}
+
+static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
+{
+ return readl_relaxed(d->base + CH_STAT);
+}
+
+static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
+{
+ if (on) {
+ /* set same priority */
+ writel_relaxed(0x0, d->base + CH_PRI);
+
+ /* unmask irq */
+ writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+ } else {
+ /* mask irq */
+ writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+ }
+}
+
+static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c;
+ u32 stat = readl_relaxed(d->base + INT_STAT);
+ u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 err1 = readl_relaxed(d->base + INT_ERR1);
+ u32 err2 = readl_relaxed(d->base + INT_ERR2);
+ u32 i, irq_chan = 0;
+
+ while (stat) {
+ i = __ffs(stat);
+ stat &= (stat - 1);
+ if (likely(tc1 & BIT(i))) {
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ irq_chan |= BIT(i);
+ }
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ dev_warn(d->slave.dev, "DMA ERR\n");
+ }
+
+ writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(err1, d->base + INT_ERR1_RAW);
+ writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+ if (irq_chan) {
+ tasklet_schedule(&d->task);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static int k3_dma_start_txd(struct k3_dma_chan *c)
+{
+ struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+ c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
+ /* start dma */
+ k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
+ return 0;
+ }
+ c->phy->ds_done = NULL;
+ c->phy->ds_run = NULL;
+ return -EAGAIN;
+}
+
+static void k3_dma_tasklet(unsigned long arg)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
+ p = c->phy;
+ if (p && p->ds_done) {
+ if (k3_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->vc.lock);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct k3_dma_chan, node);
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << pch;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irq(&c->vc.lock);
+ k3_dma_start_txd(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+ }
+ }
+}
+
+static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void k3_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct k3_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct k3_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = k3_dma_get_curr_cnt(d, p);
+ clli = k3_dma_get_curr_lli(p);
+ index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].count;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void k3_dma_issue_pending(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy) {
+ if (list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ /* check in tasklet */
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if ((num + 1) < ds->desc_num)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct k3_desc_hw);
+ ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+ ds->desc_hw[num].count = len;
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].config = ccfg;
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->size = len;
+ ds->desc_num = num;
+ num = 0;
+
+ if (!c->ccfg) {
+ /* default is memtomem, without calling device_control */
+ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+ c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
+ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
+ }
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ if (c->dir == DMA_MEM_TO_DEV) {
+ src += copy;
+ } else if (c->dir == DMA_DEV_TO_MEM) {
+ dst += copy;
+ } else {
+ src += copy;
+ dst += copy;
+ }
+ len -= copy;
+ } while (len);
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (sgl == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->desc_num = num;
+ num = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct dma_slave_config *cfg = (void *)arg;
+ struct k3_dma_phy *p = c->phy;
+ unsigned long flags;
+ u32 maxburst = 0, val = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ if (cfg == NULL)
+ return -EINVAL;
+ c->dir = cfg->direction;
+ if (c->dir == DMA_DEV_TO_MEM) {
+ c->ccfg = CX_CFG_DSTINCR;
+ c->dev_addr = cfg->src_addr;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ } else if (c->dir == DMA_MEM_TO_DEV) {
+ c->ccfg = CX_CFG_SRCINCR;
+ c->dev_addr = cfg->dst_addr;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ }
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ val = __ffs(width);
+ break;
+ default:
+ val = 3;
+ break;
+ }
+ c->ccfg |= (val << 12) | (val << 16);
+
+ if ((maxburst == 0) || (maxburst > 16))
+ val = 16;
+ else
+ val = maxburst - 1;
+ c->ccfg |= (val << 20) | (val << 24);
+ c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+ /* specific request line */
+ c->ccfg |= c->vc.chan.chan_id << 4;
+ break;
+
+ case DMA_TERMINATE_ALL:
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ k3_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ p->ds_run = p->ds_done = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+ if (p) {
+ k3_dma_pause_dma(p, false);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+ break;
+
+ case DMA_RESUME:
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+ if (p) {
+ k3_dma_pause_dma(p, true);
+ } else if (!list_empty(&c->vc.desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ break;
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+
+ kfree(ds);
+}
+
+static struct of_device_id k3_pdma_dt_ids[] = {
+ { .compatible = "hisilicon,k3-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
+
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int k3_dma_probe(struct platform_device *op)
+{
+ struct k3_dma_dev *d;
+ const struct of_device_id *of_id;
+ struct resource *iores;
+ int i, ret, irq = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
+ if (of_id) {
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ }
+
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
+
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, irq,
+ k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
+ if (d->phy == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct k3_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
+ d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
+ d->slave.device_tx_status = k3_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+ d->slave.device_issue_pending = k3_dma_issue_pending;
+ d->slave.device_control = k3_dma_control;
+ d->slave.copy_align = DMA_ALIGN;
+ d->slave.chancnt = d->dma_requests;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct k3_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = k3_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ k3_dma_enable_dma(d, true);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ k3_of_dma_simple_xlate, d);
+ if (ret)
+ goto of_dma_register_fail;
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+ platform_set_drvdata(op, d);
+ dev_info(&op->dev, "initialized\n");
+
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+ return ret;
+}
+
+static int k3_dma_remove(struct platform_device *op)
+{
+ struct k3_dma_chan *c, *cn;
+ struct k3_dma_dev *d = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+ tasklet_kill(&d->task);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_suspend(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = k3_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ k3_dma_enable_dma(d, false);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_resume(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ k3_dma_enable_dma(d, true);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+
+static struct platform_driver k3_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &k3_dma_pmops,
+ .of_match_table = k3_pdma_dt_ids,
+ },
+ .probe = k3_dma_probe,
+ .remove = k3_dma_remove,
+};
+
+module_platform_driver(k3_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_ALIAS("platform:k3dma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f9c4d..ff8d7827f8c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,9 @@
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -47,6 +49,8 @@
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
+ (((n) & 0x3f) << 2))
#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
@@ -69,7 +73,7 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
#define PDMA_ALIGNMENT 3
-#define PDMA_MAX_DESC_BYTES 0x1000
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
u32 ddadr; /* Points to the next descriptor + flags */
@@ -94,6 +98,9 @@ struct mmp_pdma_chan {
struct mmp_pdma_phy *phy;
enum dma_transfer_direction dir;
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
/* channel's basic info */
struct tasklet_struct tasklet;
u32 dcmd;
@@ -105,6 +112,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */
+ bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */
};
@@ -121,6 +129,7 @@ struct mmp_pdma_device {
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
};
#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +146,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dalgn;
if (!phy->vchan)
return;
- reg = phy->vchan->drcmr;
- reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
reg = (phy->idx << 2) + DCSR;
writel(readl(phy->base + reg) | DCSR_RUN,
phy->base + reg);
@@ -218,7 +233,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
{
int prio, i;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
- struct mmp_pdma_phy *phy;
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
/*
* dma channel priorities
@@ -227,6 +243,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
* ch 8 - 11, 24 - 27 <--> (2)
* ch 12 - 15, 28 - 31 <--> (3)
*/
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
if (prio != ((i & 0xf) >> 2))
@@ -234,31 +252,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
phy = &pdev->phy[i];
if (!phy->vchan) {
phy->vchan = pchan;
- return phy;
+ found = phy;
+ goto out_unlock;
}
}
}
- return NULL;
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
}
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
- struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
{
- struct mmp_pdma_desc_sw *tail =
- to_mmp_pdma_desc(chan->chain_pending.prev);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
- if (list_empty(&chan->chain_pending))
- goto out_splice;
+ if (!pchan->phy)
+ return;
- /* one irq per queue, even appended */
- tail->desc.ddadr = desc->async_tx.phys;
- tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->phy->vchan->drcmr);
+ writel(0, pchan->phy->base + reg);
- /* softly link to pending list */
-out_splice:
- list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
}
/**
@@ -277,10 +298,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
if (list_empty(&chan->chain_pending)) {
/* chance to re-fetch phy channel with higher prio */
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
dev_dbg(chan->dev, "no pending list\n");
return;
}
@@ -326,14 +344,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(&child->async_tx);
}
- append_pending_queue(chan, desc);
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
@@ -377,10 +397,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
return 1;
@@ -411,10 +428,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
chan->idle = true;
chan->dev_addr = 0;
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
return;
}
@@ -434,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL;
chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
}
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src;
@@ -486,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->cyclic_first = NULL;
+
return &first->async_tx;
fail:
@@ -509,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0))
return NULL;
+ chan->byte_align = false;
+
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
avail = sg_dma_len(sgl);
do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
/* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan);
@@ -557,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr;
+ dma_dst = chan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr;
+ dma_src = chan->dev_addr;
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len);
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
return &first->async_tx;
fail:
@@ -581,10 +692,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
disable_chan(chan->phy);
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
spin_lock_irqsave(&chan->desc_lock, flags);
mmp_pdma_free_desc_list(chan, &chan->chain_pending);
mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
chan->dcmd |= DCMD_BURST32;
chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
+ /* FIXME: drivers should be ported over to use the filter
+ * function. Once that's done, the following two lines can
+ * be removed.
+ */
+ if (cfg->slave_id)
+ chan->drcmr = cfg->slave_id;
break;
default:
return -ENOSYS;
@@ -632,15 +745,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/**
@@ -669,29 +774,51 @@ static void dma_do_tasklet(unsigned long data)
LIST_HEAD(chain_cleanup);
unsigned long flags;
- /* submit pending list; callback for each desc; free desc */
+ if (chan->cyclic_first) {
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ cb = desc->async_tx.callback;
+ cb_data = desc->async_tx.callback_param;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (cb)
+ cb(cb_data);
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->chain_running)) {
- dma_cookie_t cookie;
+ return;
+ }
- desc = to_mmp_pdma_desc(chan->chain_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_del(&desc->node);
+ list_add(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
}
/*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
*/
- list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
- /* the hardware is now idle and ready for more */
- chan->idle = true;
+ chan->idle = list_empty(&chan->chain_running);
/* Start any pending transactions automatically */
start_pending_queue(chan);
@@ -763,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate;
+
+retry:
+ candidate = NULL;
+
+ /* walk the list of channels registered with the current instance and
+ * find one that is currently unused */
+ list_for_each_entry(chan, &d->device.channels, device_node)
+ if (chan->client_count == 0) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ /* dma_get_slave_channel will return NULL if we lost a race between
+ * the lookup and the reservation */
+ chan = dma_get_slave_channel(candidate);
+
+ if (chan) {
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+ c->drcmr = dma_spec->args[0];
+ return chan;
+ }
+
+ goto retry;
+}
+
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
@@ -777,10 +937,9 @@ static int mmp_pdma_probe(struct platform_device *op)
return -ENOMEM;
pdev->dev = &op->dev;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
+ spin_lock_init(&pdev->phy_lock);
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(pdev->dev, iores);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
@@ -825,13 +984,15 @@ static int mmp_pdma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
- dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
pdev->device.dev = &op->dev;
pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
pdev->device.device_tx_status = mmp_pdma_tx_status;
pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_control = mmp_pdma_control;
pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -847,7 +1008,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return ret;
}
- dev_info(pdev->device.dev, "initialized\n");
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
@@ -867,6 +1038,19 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+ if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+ return false;
+
+ c->drcmr = *(unsigned int *) param;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 9b9366537d7..38cb517fb2e 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ tdmac->buf_len - tdmac->pos);
return tdmac->status;
}
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
tdev->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2d956732aa3..2fe43537733 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -556,15 +556,7 @@ static enum dma_status
mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&mchan->lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
/* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0ec086d2b6a..536dcb8ba5f 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -654,7 +654,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
return sw_desc ? &sw_desc->async_tx : NULL;
}
@@ -1171,7 +1171,7 @@ static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
struct mv_xor_device *xordev;
- struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
int i, ret;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 719593002ab..ccd13df841d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -23,7 +23,6 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
return container_of(chan, struct mxs_dma_chan, chan);
}
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_data *data = chan->private;
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- if (data)
- mxs_chan->chan_irq = data->chan_irq;
-
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
GFP_KERNEL);
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- dma_cookie_t last_used;
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
return mxs_chan->status;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 75334bdd2c5..0b88dd3d05f 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property missing or empty\n", __func__);
+ pr_err("%s: dma-names property of node '%s' missing or empty\n",
+ __func__, np->full_name);
return NULL;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 0bbdea5059f..61fdc54a3c8 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pch_dma_chan *pd_chan = to_pd_chan(chan);
- enum dma_status ret;
-
- spin_lock_irq(&pd_chan->lock);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irq(&pd_chan->lock);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void pd_issue_pending(struct dma_chan *chan)
@@ -1036,3 +1029,4 @@ MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa645d82500..a562d24d20b 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -545,6 +545,8 @@ struct dma_pl330_chan {
/* List of to be xfered descriptors */
struct list_head work_list;
+ /* List of completed descriptors */
+ struct list_head completed_list;
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
return container_of(tx, struct dma_pl330_desc, txd);
}
-static inline void free_desc_list(struct list_head *list)
-{
- struct dma_pl330_dmac *pdmac;
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- /* Finish off the work list */
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
- void *param;
-
- /* All desc in a list belong to same channel */
- pch = desc->pchan;
- callback = desc->txd.callback;
- param = desc->txd.callback_param;
-
- if (callback)
- callback(param);
-
- desc->pchan = NULL;
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- pdmac = pch->dmac;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
- list_splice_tail_init(list, &pdmac->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
-
- /* Change status to reload it */
- desc->status = PREP;
- pch = desc->pchan;
- callback = desc->txd.callback;
- if (callback)
- callback(desc->txd.callback_param);
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
- list_splice_tail_init(list, &pch->work_list);
- spin_unlock_irqrestore(&pch->lock, flags);
-}
-
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
- LIST_HEAD(list);
spin_lock_irqsave(&pch->lock, flags);
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data)
if (desc->status == DONE) {
if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
- list_move_tail(&desc->node, &list);
+ list_move_tail(&desc->node, &pch->completed_list);
}
/* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data)
/* Make sure the PL330 Channel thread is active */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
- spin_unlock_irqrestore(&pch->lock, flags);
+ while (!list_empty(&pch->completed_list)) {
+ dma_async_tx_callback callback;
+ void *callback_param;
- if (pch->cyclic)
- handle_cyclic_desc_list(&list);
- else
- free_desc_list(&list);
+ desc = list_first_entry(&pch->completed_list,
+ struct dma_pl330_desc, node);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ if (pch->cyclic) {
+ desc->status = PREP;
+ list_move_tail(&desc->node, &pch->work_list);
+ } else {
+ desc->status = FREE;
+ list_move_tail(&desc->node, &pch->dmac->desc_pool);
+ }
+
+ if (callback) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&pch->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc, *_dt;
+ struct dma_pl330_desc *desc;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
- list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
- desc->status = DONE;
- list_move_tail(&desc->node, &list);
+ list_for_each_entry(desc, &pch->work_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&list, &pdmac->desc_pool);
+ list_for_each_entry(desc, &pch->completed_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2814,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+ struct dma_pl330_desc *first)
+{
+ unsigned long flags;
+ struct dma_pl330_desc *desc;
+
+ if (!first)
+ return;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
- unsigned long flags;
int i;
dma_addr_t addr;
@@ -2842,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(pch->dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- if (!first)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
-
- while (!list_empty(&first->node)) {
- desc = list_entry(first->node.next,
- struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
- }
-
- list_move_tail(&first->node, &pdmac->desc_pool);
-
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ __pl330_giveback_desc(pdmac, first);
return NULL;
}
@@ -2896,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
+#define PL330_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2908,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int i, ret, irq;
int num_chan;
- pdat = adev->dev.platform_data;
+ pdat = dev_get_platdata(&adev->dev);
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2971,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
+ INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
@@ -3000,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
+ pd->device_slave_caps = pl330_dma_device_slave_caps;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3015,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+ /*
+ * This is the limit for transfers with a buswidth of 1, larger
+ * buswidths will have larger limits.
+ */
+ ret = dma_set_max_seg_size(&adev->dev, 1900800);
+ if (ret)
+ dev_err(&adev->dev, "unable to set the seg size\n");
+
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 5c1dee20c13..dadd9e010c0 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -22,3 +22,13 @@ config SUDMAC
depends on SH_DMAE_BASE
help
Enable support for the Renesas SUDMAC controllers.
+
+config RCAR_HPB_DMAE
+ tristate "Renesas R-Car HPB DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car series DMA controllers.
+
+config SHDMA_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c962138dde9..e856af23b78 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,9 @@
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+ifeq ($(CONFIG_OF),y)
+shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
+endif
+shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
new file mode 100644
index 00000000000..45a520281ce
--- /dev/null
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This file is based on the drivers/dma/sh/shdma.c
+ *
+ * Renesas SuperH DMA Engine support
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - max DMA size is 16MB.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
+#include <linux/slab.h>
+
+/* DMA channel registers */
+#define HPB_DMAE_DSAR0 0x00
+#define HPB_DMAE_DDAR0 0x04
+#define HPB_DMAE_DTCR0 0x08
+#define HPB_DMAE_DSAR1 0x0C
+#define HPB_DMAE_DDAR1 0x10
+#define HPB_DMAE_DTCR1 0x14
+#define HPB_DMAE_DSASR 0x18
+#define HPB_DMAE_DDASR 0x1C
+#define HPB_DMAE_DTCSR 0x20
+#define HPB_DMAE_DPTR 0x24
+#define HPB_DMAE_DCR 0x28
+#define HPB_DMAE_DCMDR 0x2C
+#define HPB_DMAE_DSTPR 0x30
+#define HPB_DMAE_DSTSR 0x34
+#define HPB_DMAE_DDBGR 0x38
+#define HPB_DMAE_DDBGR2 0x3C
+#define HPB_DMAE_CHAN(n) (0x40 * (n))
+
+/* DMA command register (DCMDR) bits */
+#define HPB_DMAE_DCMDR_BDOUT BIT(7)
+#define HPB_DMAE_DCMDR_DQSPD BIT(6)
+#define HPB_DMAE_DCMDR_DQSPC BIT(5)
+#define HPB_DMAE_DCMDR_DMSPD BIT(4)
+#define HPB_DMAE_DCMDR_DMSPC BIT(3)
+#define HPB_DMAE_DCMDR_DQEND BIT(2)
+#define HPB_DMAE_DCMDR_DNXT BIT(1)
+#define HPB_DMAE_DCMDR_DMEN BIT(0)
+
+/* DMA forced stop register (DSTPR) bits */
+#define HPB_DMAE_DSTPR_DMSTP BIT(0)
+
+/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DMSTS BIT(0)
+
+/* DMA common registers */
+#define HPB_DMAE_DTIMR 0x00
+#define HPB_DMAE_DINTSR0 0x0C
+#define HPB_DMAE_DINTSR1 0x10
+#define HPB_DMAE_DINTCR0 0x14
+#define HPB_DMAE_DINTCR1 0x18
+#define HPB_DMAE_DINTMR0 0x1C
+#define HPB_DMAE_DINTMR1 0x20
+#define HPB_DMAE_DACTSR0 0x24
+#define HPB_DMAE_DACTSR1 0x28
+#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
+#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
+#define HPB_DMAE_HPB_DMLVLR0 0x160
+#define HPB_DMAE_HPB_DMLVLR1 0x164
+#define HPB_DMAE_HPB_DMSHPT0 0x168
+#define HPB_DMAE_HPB_DMSHPT1 0x16C
+
+#define HPB_DMA_SLAVE_NUMBER 256
+#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
+
+struct hpb_dmae_chan {
+ struct shdma_chan shdma_chan;
+ int xfer_mode; /* DMA transfer mode */
+#define XFER_SINGLE 1
+#define XFER_DOUBLE 2
+ unsigned plane_idx; /* current DMA information set */
+ bool first_desc; /* first/next transfer */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+ const struct hpb_dmae_slave_config *cfg;
+ char dev_id[16]; /* unique name per DMAC of channel */
+};
+
+struct hpb_dmae_device {
+ struct shdma_dev shdma_dev;
+ spinlock_t reg_lock; /* comm_reg operation lock */
+ struct hpb_dmae_pdata *pdata;
+ void __iomem *chan_reg;
+ void __iomem *comm_reg;
+ void __iomem *reset_reg;
+ void __iomem *mode_reg;
+};
+
+struct hpb_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct hpb_desc {
+ struct shdma_desc shdma_desc;
+ struct hpb_dmae_regs hw;
+ unsigned plane_idx;
+};
+
+#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
+#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct hpb_dmae_device, shdma_dev.dma_dev)
+
+static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
+{
+ iowrite32(data, hpb_dc->base + reg);
+}
+
+static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
+{
+ return ioread32(hpb_dc->base + reg);
+}
+
+static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
+}
+
+static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
+}
+
+static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 v;
+
+ if (ch < 32)
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
+ else
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
+ return v & 0x1;
+}
+
+static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ if (ch < 32)
+ iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
+ else
+ iowrite32((0x1 << (ch - 32)),
+ hpbdev->comm_reg + HPB_DMAE_DINTCR1);
+}
+
+static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->mode_reg);
+}
+
+static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
+{
+ return ioread32(hpbdev->mode_reg);
+}
+
+static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 intreg;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ if (ch < 32) {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ iowrite32(BIT(ch) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ } else {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ iowrite32(BIT(ch - 32) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ }
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ u32 rstr;
+ int timeout = 10000; /* 100 ms */
+
+ spin_lock(&hpbdev->reg_lock);
+ rstr = ioread32(hpbdev->reset_reg);
+ rstr |= data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ do {
+ rstr = ioread32(hpbdev->reset_reg);
+ if ((rstr & data) == data)
+ break;
+ udelay(10);
+ } while (timeout--);
+
+ if (timeout < 0)
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "%s timeout\n", __func__);
+
+ rstr &= ~data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ spin_unlock(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
+ u32 mask, u32 data)
+{
+ u32 mode;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ mode = asyncmdr_read(hpbdev);
+ mode &= ~mask;
+ mode |= data;
+ asyncmdr_write(hpbdev, mode);
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
+{
+ dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
+}
+
+static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
+{
+ u32 ch;
+
+ for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
+ hsrstr_write(hpbdev, ch);
+}
+
+static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
+ int i;
+
+ switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
+ case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
+ default:
+ i = XMIT_SZ_8BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
+ i = XMIT_SZ_16BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
+ i = XMIT_SZ_32BIT;
+ break;
+ }
+ return pdata->ts_shift[i];
+}
+
+static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
+ struct hpb_dmae_regs *hw, unsigned plane)
+{
+ ch_reg_write(hpb_chan, hw->sar,
+ plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
+ ch_reg_write(hpb_chan, hw->dar,
+ plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
+ ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
+ plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+}
+
+static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
+{
+ ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
+ HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
+}
+
+static void hpb_dmae_halt(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
+ ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+}
+
+static const struct hpb_dmae_slave_config *
+hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int i;
+
+ if (slave_id >= HPB_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0; i < pdata->num_slaves; i++)
+ if (pdata->slaves[i].id == slave_id)
+ return pdata->slaves + i;
+
+ return NULL;
+}
+
+static void hpb_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
+ hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
+
+ desc->plane_idx = chan->plane_idx;
+ hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
+ hpb_dmae_start(chan, !chan->first_desc);
+
+ if (chan->xfer_mode == XFER_DOUBLE) {
+ chan->plane_idx ^= 1;
+ chan->first_desc = false;
+ }
+}
+
+static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ /*
+ * This is correct since we always have at most single
+ * outstanding DMA transfer per channel, and by the time
+ * we get completion interrupt the transfer is completed.
+ * This will change if we ever use alternating DMA
+ * information sets and submit two descriptors at once.
+ */
+ return true;
+}
+
+static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ int ch = chan->cfg->dma_ch;
+
+ /* Check Complete DMA Transfer */
+ if (dintsr_read(hpbdev, ch)) {
+ /* Clear Interrupt status */
+ dintcr_write(hpbdev, ch);
+ return true;
+ }
+ return false;
+}
+
+static int hpb_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (*len > (size_t)HPB_DMA_TCR_MAX)
+ *len = (size_t)HPB_DMA_TCR_MAX;
+
+ desc->hw.sar = src;
+ desc->hw.dar = dst;
+ desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 tcr = ch_reg_read(chan, desc->plane_idx ?
+ HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+
+ return (desc->hw.tcr - tcr) << chan->xmit_shift;
+}
+
+static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
+
+ return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+}
+
+static int
+hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
+ const struct hpb_dmae_slave_config *cfg)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ const struct hpb_dmae_channel *channel = pdata->channels;
+ int slave_id = cfg->id;
+ int i, err;
+
+ for (i = 0; i < pdata->num_channels; i++, channel++) {
+ if (channel->s_id == slave_id) {
+ struct device *dev = hpb_chan->shdma_chan.dev;
+
+ hpb_chan->base = hpbdev->chan_reg +
+ HPB_DMAE_CHAN(cfg->dma_ch);
+
+ dev_dbg(dev, "Detected Slave device\n");
+ dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
+ dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
+ dev_dbg(dev, " -- channel->ch_irq: %d\n",
+ channel->ch_irq);
+ break;
+ }
+ }
+
+ err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
+ IRQF_SHARED, hpb_chan->dev_id);
+ if (err) {
+ dev_err(hpb_chan->shdma_chan.dev,
+ "DMA channel request_irq %d failed with error %d\n",
+ channel->ch_irq, err);
+ return err;
+ }
+
+ hpb_chan->plane_idx = 0;
+ hpb_chan->first_desc = true;
+
+ if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
+ hpb_chan->xfer_mode = XFER_SINGLE;
+ } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
+ (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
+ hpb_chan->xfer_mode = XFER_DOUBLE;
+ } else {
+ dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
+ shdma_free_irq(&hpb_chan->shdma_chan);
+ return -EINVAL;
+ }
+
+ if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
+ hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
+ ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
+ ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
+ hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
+ hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
+
+ return 0;
+}
+
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ const struct hpb_dmae_slave_config *sc =
+ hpb_dmae_find_slave(chan, slave_id);
+
+ if (!sc)
+ return -ENODEV;
+ if (try)
+ return 0;
+ chan->cfg = sc;
+ return hpb_dmae_alloc_chan_resources(chan, sc);
+}
+
+static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ return chan->cfg->addr;
+}
+
+static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct hpb_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops hpb_dmae_ops = {
+ .desc_completed = hpb_dmae_desc_completed,
+ .halt_channel = hpb_dmae_halt,
+ .channel_busy = hpb_dmae_channel_busy,
+ .slave_addr = hpb_dmae_slave_addr,
+ .desc_setup = hpb_dmae_desc_setup,
+ .set_slave = hpb_dmae_set_slave,
+ .setup_xfer = hpb_dmae_setup_xfer,
+ .start_xfer = hpb_dmae_start_xfer,
+ .embedded_desc = hpb_dmae_embedded_desc,
+ .chan_irq = hpb_dmae_chan_irq,
+ .get_partial = hpb_dmae_get_partial,
+};
+
+static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
+{
+ struct shdma_dev *sdev = &hpbdev->shdma_dev;
+ struct platform_device *pdev =
+ to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
+ struct hpb_dmae_chan *new_hpb_chan;
+ struct shdma_chan *schan;
+
+ /* Alloc channel */
+ new_hpb_chan = devm_kzalloc(&pdev->dev,
+ sizeof(struct hpb_dmae_chan), GFP_KERNEL);
+ if (!new_hpb_chan) {
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "No free memory for allocating DMA channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &new_hpb_chan->shdma_chan;
+ shdma_chan_probe(sdev, schan, id);
+
+ if (pdev->id >= 0)
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dma.%d", id);
+
+ return 0;
+}
+
+static int hpb_dmae_probe(struct platform_device *pdev)
+{
+ struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
+ struct hpb_dmae_device *hpbdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *comm, *rest, *mode, *irq_res;
+ int err, i;
+
+ /* Get platform data */
+ if (!pdata || !pdata->num_channels)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res)
+ return -ENODEV;
+
+ hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
+ GFP_KERNEL);
+ if (!hpbdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return -ENOMEM;
+ }
+
+ hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(hpbdev->chan_reg))
+ return PTR_ERR(hpbdev->chan_reg);
+
+ hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
+ if (IS_ERR(hpbdev->comm_reg))
+ return PTR_ERR(hpbdev->comm_reg);
+
+ hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
+ if (IS_ERR(hpbdev->reset_reg))
+ return PTR_ERR(hpbdev->reset_reg);
+
+ hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
+ if (IS_ERR(hpbdev->mode_reg))
+ return PTR_ERR(hpbdev->mode_reg);
+
+ dma_dev = &hpbdev->shdma_dev.dma_dev;
+
+ spin_lock_init(&hpbdev->reg_lock);
+
+ /* Platform data */
+ hpbdev->pdata = pdata;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ /* Reset DMA controller */
+ hpb_dmae_reset(hpbdev);
+
+ pm_runtime_put(&pdev->dev);
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ hpbdev->shdma_dev.ops = &hpb_dmae_ops;
+ hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
+ err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
+ if (err < 0)
+ goto error;
+
+ /* Create DMA channels */
+ for (i = 0; i < pdata->num_channels; i++)
+ hpb_dmae_chan_probe(hpbdev, i);
+
+ platform_set_drvdata(pdev, hpbdev);
+ err = dma_async_device_register(dma_dev);
+ if (!err)
+ return 0;
+
+ shdma_cleanup(&hpbdev->shdma_dev);
+error:
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
+{
+ struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
+ BUG_ON(!schan);
+
+ shdma_free_irq(schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int hpb_dmae_remove(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ hpb_dmae_chan_remove(hpbdev);
+
+ return 0;
+}
+
+static void hpb_dmae_shutdown(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+ hpb_dmae_ctl_stop(hpbdev);
+}
+
+static struct platform_driver hpb_dmae_driver = {
+ .probe = hpb_dmae_probe,
+ .remove = hpb_dmae_remove,
+ .shutdown = hpb_dmae_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hpb-dma-engine",
+ },
+};
+module_platform_driver(hpb_dmae_driver);
+
+MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 00000000000..a2b8258426c
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,51 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_512BIT] = 6, \
+}
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 28ca3612163..d94ab592cc1 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,7 +171,8 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -179,7 +180,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (schan->dev->of_node) {
match = schan->hw_req;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
@@ -194,7 +195,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, match, false);
+ ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -236,7 +237,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, 0, true);
if (ret < 0)
return false;
@@ -259,7 +260,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id);
+ ret = shdma_setup_slave(schan, slave->slave_id, 0);
if (ret < 0)
goto esetslave;
} else {
@@ -680,7 +681,9 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel, while using it...
*/
config = (struct dma_slave_config *)arg;
- ret = shdma_setup_slave(schan, config->slave_id);
+ ret = shdma_setup_slave(schan, config->slave_id,
+ config->direction == DMA_DEV_TO_MEM ?
+ config->src_addr : config->dst_addr);
if (ret < 0)
return ret;
break;
@@ -831,8 +834,8 @@ static irqreturn_t chan_irqt(int irq, void *dev)
int shdma_request_irq(struct shdma_chan *schan, int irq,
unsigned long flags, const char *name)
{
- int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
- flags, name, schan);
+ int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+ chan_irqt, flags, name, schan);
schan->irq = ret < 0 ? ret : irq;
@@ -840,13 +843,6 @@ int shdma_request_irq(struct shdma_chan *schan, int irq,
}
EXPORT_SYMBOL(shdma_request_irq);
-void shdma_free_irq(struct shdma_chan *schan)
-{
- if (schan->irq >= 0)
- free_irq(schan->irq, schan);
-}
-EXPORT_SYMBOL(shdma_free_irq);
-
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id)
{
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 11bcb05cd79..06473a05fe4 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -42,12 +42,9 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
static int shdma_of_probe(struct platform_device *pdev)
{
- const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
int ret;
- if (!lookup)
- return -EINVAL;
-
ret = of_dma_controller_register(pdev->dev.of_node,
shdma_of_xlate, pdev);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
new file mode 100644
index 00000000000..4fb99970a3e
--- /dev/null
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -0,0 +1,77 @@
+/*
+ * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+#include <linux/sh_dma.h>
+
+#include "shdma-arm.h"
+
+const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
+
+static const struct sh_dmae_slave_config dma_slaves[] = {
+ {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1, /* MMC0 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2, /* MMC0 Rx */
+ }, {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe1, /* MMC1 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe2, /* MMC1 Rx */
+ },
+};
+
+#define DMAE_CHANNEL(a, b) \
+ { \
+ .offset = (a) - 0x20, \
+ .dmars = (a) - 0x20 + 0x40, \
+ .chclr_bit = (b), \
+ .chclr_offset = 0x80 - 0x20, \
+ }
+
+static const struct sh_dmae_channel dma_channels[] = {
+ DMAE_CHANNEL(0x8000, 0),
+ DMAE_CHANNEL(0x8080, 1),
+ DMAE_CHANNEL(0x8100, 2),
+ DMAE_CHANNEL(0x8180, 3),
+ DMAE_CHANNEL(0x8200, 4),
+ DMAE_CHANNEL(0x8280, 5),
+ DMAE_CHANNEL(0x8300, 6),
+ DMAE_CHANNEL(0x8380, 7),
+ DMAE_CHANNEL(0x8400, 8),
+ DMAE_CHANNEL(0x8480, 9),
+ DMAE_CHANNEL(0x8500, 10),
+ DMAE_CHANNEL(0x8580, 11),
+ DMAE_CHANNEL(0x8600, 12),
+ DMAE_CHANNEL(0x8680, 13),
+ DMAE_CHANNEL(0x8700, 14),
+ DMAE_CHANNEL(0x8780, 15),
+ DMAE_CHANNEL(0x8800, 16),
+ DMAE_CHANNEL(0x8880, 17),
+ DMAE_CHANNEL(0x8900, 18),
+ DMAE_CHANNEL(0x8980, 19),
+};
+
+const struct sh_dmae_pdata r8a73a4_dma_pdata = {
+ .slave = dma_slaves,
+ .slave_num = ARRAY_SIZE(dma_slaves),
+ .channel = dma_channels,
+ .channel_num = ARRAY_SIZE(dma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+ .chclr_bitwise = 1,
+};
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9314e93225d..758a57b5187 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -28,18 +28,19 @@ struct sh_dmae_chan {
struct shdma_chan shdma_chan;
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
- u32 __iomem *base;
+ void __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ dma_addr_t slave_addr;
};
struct sh_dmae_device {
struct shdma_dev shdma_dev;
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
- struct sh_dmae_pdata *pdata;
+ const struct sh_dmae_pdata *pdata;
struct list_head node;
- u32 __iomem *chan_reg;
- u16 __iomem *dmars;
+ void __iomem *chan_reg;
+ void __iomem *dmars;
unsigned int chcr_offset;
u32 chcr_ie_bit;
};
@@ -61,4 +62,11 @@ struct sh_dmae_desc {
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
+#ifdef CONFIG_SHDMA_R8A73A4
+extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
+#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
+#else
+#define r8a73a4_shdma_devid NULL
+#endif
+
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c
index 5039fbc8825..1069e8869f2 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdmac.c
@@ -20,6 +20,8 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
@@ -35,6 +37,15 @@
#include "../dmaengine.h"
#include "shdma.h"
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+#define TEND 0x18 /* USB-DMAC */
+
#define SH_DMAE_DRV_NAME "sh-dma-engine"
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
@@ -49,27 +60,37 @@
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ * channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ * corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+ const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+ sh_dc->shdma_chan.id;
+ u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+ __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
- __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+ __raw_writel(data, sh_dc->base + reg);
}
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
- return __raw_readl(sh_dc->base + reg / sizeof(u32));
+ return __raw_readl(sh_dc->base + reg);
}
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
return __raw_readl(addr);
@@ -79,7 +100,7 @@ static u16 dmaor_read(struct sh_dmae_device *shdev)
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
__raw_writel(data, addr);
@@ -91,14 +112,14 @@ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset);
}
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ return __raw_readl(sh_dc->base + shdev->chcr_offset);
}
/*
@@ -133,7 +154,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
if (sh_chan)
- chclr_write(sh_chan, 0);
+ channel_clear(sh_chan);
}
}
@@ -167,7 +188,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -180,7 +201,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
for (i = 0; i < pdata->ts_shift_num; i++)
@@ -240,9 +261,9 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
- u16 __iomem *addr = shdev->dmars;
+ void __iomem *addr = shdev->dmars;
unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
@@ -253,8 +274,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
- addr = (u16 __iomem *)shdev->chan_reg;
- addr += chan_pdata->dmars / sizeof(u16);
+ addr = shdev->chan_reg;
+ addr += chan_pdata->dmars;
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
addr);
@@ -309,7 +330,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
@@ -323,7 +344,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
} else {
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
if (cfg->mid_rid == match) {
- sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ sh_chan->shdma_chan.slave_id = i;
return cfg;
}
}
@@ -332,7 +353,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
}
static int sh_dmae_set_slave(struct shdma_chan *schan,
- int slave_id, bool try)
+ int slave_id, dma_addr_t slave_addr, bool try)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
@@ -340,8 +361,10 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
if (!cfg)
return -ENXIO;
- if (!try)
+ if (!try) {
sh_chan->config = cfg;
+ sh_chan->slave_addr = slave_addr ? : cfg->addr;
+ }
return 0;
}
@@ -505,7 +528,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
struct shdma_chan *schan;
int err;
- sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+ GFP_KERNEL);
if (!sh_chan) {
dev_err(sdev->dma_dev.dev,
"No free memory for allocating dma channels!\n");
@@ -517,7 +541,7 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
shdma_chan_probe(sdev, schan, id);
- sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset;
/* set up channel irq */
if (pdev->id >= 0)
@@ -541,7 +565,6 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
err_no_irq:
/* remove from dmaengine device node */
shdma_chan_remove(schan);
- kfree(sh_chan);
return err;
}
@@ -552,14 +575,9 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
int i;
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
- struct sh_dmae_chan *sh_chan = container_of(schan,
- struct sh_dmae_chan, shdma_chan);
BUG_ON(!schan);
- shdma_free_irq(&sh_chan->shdma_chan);
-
shdma_chan_remove(schan);
- kfree(sh_chan);
}
dma_dev->chancnt = 0;
}
@@ -636,7 +654,7 @@ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
* This is an exclusive slave DMA operation, may only be called after a
* successful slave configuration.
*/
- return sh_chan->config->addr;
+ return sh_chan->slave_addr;
}
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
@@ -658,9 +676,15 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.get_partial = sh_dmae_get_partial,
};
+static const struct of_device_id sh_dmae_of_match[] = {
+ {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static int sh_dmae_probe(struct platform_device *pdev)
{
- struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ const struct sh_dmae_pdata *pdata;
unsigned long irqflags = IRQF_DISABLED,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -669,6 +693,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ if (pdev->dev.of_node)
+ pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
/* get platform data */
if (!pdata || !pdata->channel_num)
return -ENODEV;
@@ -696,33 +725,22 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!chan || !errirq_res)
return -ENODEV;
- if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
- dev_err(&pdev->dev, "DMAC register region already claimed\n");
- return -EBUSY;
- }
-
- if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
- dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
- err = -EBUSY;
- goto ermrdmars;
- }
-
- err = -ENOMEM;
- shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+ GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "Not enough memory\n");
- goto ealloc;
+ return -ENOMEM;
}
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = ioremap(chan->start, resource_size(chan));
- if (!shdev->chan_reg)
- goto emapchan;
+ shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(shdev->chan_reg))
+ return PTR_ERR(shdev->chan_reg);
if (dmars) {
- shdev->dmars = ioremap(dmars->start, resource_size(dmars));
- if (!shdev->dmars)
- goto emapdmars;
+ shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+ if (IS_ERR(shdev->dmars))
+ return PTR_ERR(shdev->dmars);
}
if (!pdata->slave_only)
@@ -783,8 +801,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
errirq = errirq_res->start;
- err = request_irq(errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev,
"DMA failed requesting irq #%d, error %d\n",
@@ -862,7 +880,6 @@ chan_probe_err:
sh_dmae_chan_remove(shdev);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- free_irq(errirq, shdev);
eirq_err:
#endif
rst_err:
@@ -873,21 +890,9 @@ rst_err:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&shdev->shdma_dev);
eshdma:
- if (dmars)
- iounmap(shdev->dmars);
-emapdmars:
- iounmap(shdev->chan_reg);
synchronize_rcu();
-emapchan:
- kfree(shdev);
-ealloc:
- if (dmars)
- release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
- release_mem_region(chan->start, resource_size(chan));
return err;
}
@@ -896,14 +901,9 @@ static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
- struct resource *res;
- int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(dma_dev);
- if (errirq > 0)
- free_irq(errirq, shdev);
-
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irq(&sh_dmae_lock);
@@ -913,31 +913,11 @@ static int sh_dmae_remove(struct platform_device *pdev)
sh_dmae_chan_remove(shdev);
shdma_cleanup(&shdev->shdma_dev);
- if (shdev->dmars)
- iounmap(shdev->dmars);
- iounmap(shdev->chan_reg);
-
- platform_set_drvdata(pdev, NULL);
-
synchronize_rcu();
- kfree(shdev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, resource_size(res));
return 0;
}
-static const struct of_device_id sh_dmae_of_match[] = {
- { .compatible = "renesas,shdma", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index e7c94bbddb5..c7e9cdff070 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -150,7 +150,8 @@ static const struct sudmac_slave_config *sudmac_find_slave(
return NULL;
}
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
{
struct sudmac_chan *sc = to_chan(schan);
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
@@ -298,11 +299,8 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
int i;
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
- struct sudmac_chan *sc = to_chan(schan);
-
BUG_ON(!schan);
- shdma_free_irq(&sc->shdma_chan);
shdma_chan_remove(schan);
}
dma_dev->chancnt = 0;
@@ -335,7 +333,7 @@ static const struct shdma_ops sudmac_shdma_ops = {
static int sudmac_probe(struct platform_device *pdev)
{
- struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
int err, i;
struct sudmac_device *su_dev;
struct dma_device *dma_dev;
@@ -345,9 +343,8 @@ static int sudmac_probe(struct platform_device *pdev)
if (!pdata)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !irq_res)
+ if (!irq_res)
return -ENODEV;
err = -ENOMEM;
@@ -360,9 +357,10 @@ static int sudmac_probe(struct platform_device *pdev)
dma_dev = &su_dev->shdma_dev.dma_dev;
- su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
- if (!su_dev->chan_reg)
- return err;
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(su_dev->chan_reg))
+ return PTR_ERR(su_dev->chan_reg);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
@@ -373,7 +371,7 @@ static int sudmac_probe(struct platform_device *pdev)
return err;
/* platform data */
- su_dev->pdata = pdev->dev.platform_data;
+ su_dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, su_dev);
@@ -393,7 +391,6 @@ static int sudmac_probe(struct platform_device *pdev)
chan_probe_err:
sudmac_chan_remove(su_dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&su_dev->shdma_dev);
return err;
@@ -407,7 +404,6 @@ static int sudmac_remove(struct platform_device *pdev)
dma_async_device_unregister(dma_dev);
sudmac_chan_remove(su_dev);
shdma_cleanup(&su_dev->shdma_dev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 716b23e4f32..6aec3ad814d 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan {
int mode;
};
+struct sirfsoc_dma_regs {
+ u32 ctrl[SIRFSOC_DMA_CHANNELS];
+ u32 interrupt_en;
+};
+
struct sirfsoc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
@@ -81,10 +87,13 @@ struct sirfsoc_dma {
int irq;
struct clk *clk;
bool is_marco;
+ struct sirfsoc_dma_regs regs_save;
};
#define DRV_NAME "sirfsoc_dma"
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
/* Convert struct dma_chan to struct sirfsoc_dma_chan */
static inline
struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
int i;
+ pm_runtime_get_sync(sdma->dma.dev);
+
/* Alloc descriptors for this channel */
for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_desc *sdesc, *tmp;
unsigned long flags;
LIST_HEAD(descs);
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
/* Free descriptors */
list_for_each_entry_safe(sdesc, tmp, &descs, node)
kfree(sdesc);
+
+ pm_runtime_put(sdma->dma.dev);
}
/* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
spin_unlock_irqrestore(&schan->lock, iflags);
if (!sdesc)
- return 0;
+ return NULL;
/* Place descriptor in prepared list */
spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op)
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
- clk_prepare_enable(sdma->clk);
-
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
+
ret = dma_async_device_register(dma);
if (ret)
goto free_irq;
+ pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
+ pm_runtime_disable(&op->dev);
+ if (!pm_runtime_status_suspended(&op->dev))
+ sirfsoc_dma_runtime_suspend(&op->dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sdma->clk);
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sdma->clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /*
+ * if we were runtime-suspended before, resume to enable clock
+ * before accessing register
+ */
+ if (pm_runtime_status_suspended(dev)) {
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * DMA controller will lose all registers while suspending
+ * so we need to save registers for active channels
+ */
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ save->ctrl[ch] = readl_relaxed(sdma->base +
+ ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ }
+ save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /* Disable clock */
+ sirfsoc_dma_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ writel_relaxed(sdesc->width,
+ sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+ writel_relaxed(sdesc->xlen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(save->ctrl[ch],
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->addr >> 2,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+ }
+
+ /* if we were runtime-suspended before, suspend again */
+ if (pm_runtime_status_suspended(dev))
+ sirfsoc_dma_runtime_suspend(dev);
+
return 0;
}
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &sirfsoc_dma_pm_ops,
.of_match_table = sirfsoc_dma_match,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5ab5880d5c9..82d2b97ad94 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2591,6 +2591,9 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
int i;
sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
+ if (!sg)
+ return NULL;
+
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -3139,7 +3142,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
@@ -3226,8 +3229,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
- "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
- rev, res->start, num_phy_chans, num_log_chans);
+ "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
+ rev, &res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
@@ -3485,7 +3488,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
- const const __be32 *list;
+ const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
@@ -3516,7 +3519,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
list = of_get_property(np, "disabled-channels", &num_disabled);
num_disabled /= sizeof(*list);
- if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
@@ -3535,7 +3538,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
static int __init d40_probe(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
struct d40_base *base = NULL;
@@ -3579,9 +3582,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- d40_err(&pdev->dev,
- "Failed to request LCPA region 0x%x-0x%x\n",
- res->start, res->end);
+ d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
goto failure;
}
@@ -3589,8 +3590,8 @@ static int __init d40_probe(struct platform_device *pdev)
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
- "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
- __func__, val, res->start);
+ "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
+ __func__, val, &res->start);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f137914d7b1..5d4986e5f5f 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned long flags;
unsigned int residual;
- spin_lock_irqsave(&tdc->lock, flags);
-
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS) {
- spin_unlock_irqrestore(&tdc->lock, flags);
+ if (ret == DMA_SUCCESS)
return ret;
- }
+
+ spin_lock_irqsave(&tdc->lock, flags);
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0ef43c136aa..28af214fce0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index a59fb4841d4..71e8e775189 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dc->lock);
- txx9dmac_scan_descriptors(dc);
- spin_unlock_bh(&dc->lock);
+ if (ret == DMA_SUCCESS)
+ return DMA_SUCCESS;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_bh(&dc->lock);
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock_bh(&dc->lock);
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev)
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
{
- struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+ struct txx9dmac_chan_platform_data *cpdata =
+ dev_get_platdata(&pdev->dev);
struct platform_device *dmac_dev = cpdata->dmac_dev;
- struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
struct txx9dmac_chan *dc;
int err;
int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
static int __init txx9dmac_probe(struct platform_device *pdev)
{
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *io;
struct txx9dmac_dev *ddev;
u32 mcr;
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 mcr;
mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 232fa8fce26..fa0affb699b 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -14,7 +14,7 @@
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: http://www.dmtf.org/standards
*/
-static char dmi_empty_string[] = " ";
+static const char dmi_empty_string[] = " ";
static u16 __initdata dmi_ver;
/*
@@ -49,7 +49,7 @@ static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
return "";
}
-static char * __init dmi_string(const struct dmi_header *dm, u8 s)
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
{
const char *bp = dmi_string_nosave(dm, s);
char *str;
@@ -62,8 +62,6 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
str = dmi_alloc(len);
if (str != NULL)
strcpy(str, bp);
- else
- printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
return str;
}
@@ -133,17 +131,18 @@ static int __init dmi_checksum(const u8 *buf, u8 len)
return sum == 0;
}
-static char *dmi_ident[DMI_STRING_MAX];
+static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
/*
* Save a DMI string
*/
-static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string)
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+ int string)
{
- const char *d = (const char*) dm;
- char *p;
+ const char *d = (const char *) dm;
+ const char *p;
if (dmi_ident[slot])
return;
@@ -155,9 +154,10 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int str
dmi_ident[slot] = p;
}
-static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+ int index)
{
- const u8 *d = (u8*) dm + index;
+ const u8 *d = (u8 *) dm + index;
char *s;
int is_ff = 1, is_00 = 1, i;
@@ -188,12 +188,13 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
else
sprintf(s, "%pUB", d);
- dmi_ident[slot] = s;
+ dmi_ident[slot] = s;
}
-static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index)
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+ int index)
{
- const u8 *d = (u8*) dm + index;
+ const u8 *d = (u8 *) dm + index;
char *s;
if (dmi_ident[slot])
@@ -216,10 +217,8 @@ static void __init dmi_save_one_device(int type, const char *name)
return;
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
- if (!dev) {
- printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
+ if (!dev)
return;
- }
dev->type = type;
strcpy((char *)(dev + 1), name);
@@ -249,17 +248,14 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
struct dmi_device *dev;
for (i = 1; i <= count; i++) {
- char *devname = dmi_string(dm, i);
+ const char *devname = dmi_string(dm, i);
if (devname == dmi_empty_string)
continue;
dev = dmi_alloc(sizeof(*dev));
- if (!dev) {
- printk(KERN_ERR
- "dmi_save_oem_strings_devices: out of memory.\n");
+ if (!dev)
break;
- }
dev->type = DMI_DEV_TYPE_OEM_STRING;
dev->name = devname;
@@ -272,21 +268,17 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
{
struct dmi_device *dev;
- void * data;
+ void *data;
data = dmi_alloc(dm->length);
- if (data == NULL) {
- printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+ if (data == NULL)
return;
- }
memcpy(data, dm, dm->length);
dev = dmi_alloc(sizeof(*dev));
- if (!dev) {
- printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
+ if (!dev)
return;
- }
dev->type = DMI_DEV_TYPE_IPMI;
dev->name = "IPMI controller";
@@ -301,10 +293,9 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
struct dmi_dev_onboard *onboard_dev;
onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
- if (!onboard_dev) {
- printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n");
+ if (!onboard_dev)
return;
- }
+
onboard_dev->instance = instance;
onboard_dev->segment = segment;
onboard_dev->bus = bus;
@@ -320,7 +311,7 @@ static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
- const u8 *d = (u8*) dm + 5;
+ const u8 *d = (u8 *) dm + 5;
/* Skip disabled device */
if ((*d & 0x80) == 0)
@@ -338,7 +329,7 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
*/
static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
{
- switch(dm->type) {
+ switch (dm->type) {
case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
@@ -502,13 +493,7 @@ void __init dmi_scan_machine(void)
dmi_available = 1;
goto out;
}
- }
- else {
- /*
- * no iounmap() for that ioremap(); it would be a no-op, but
- * it's so early in setup that sucker gets confused into doing
- * what it shouldn't if we actually call it.
- */
+ } else {
p = dmi_ioremap(0xF0000, 0x10000);
if (p == NULL)
goto error;
@@ -533,7 +518,7 @@ void __init dmi_scan_machine(void)
dmi_iounmap(p, 0x10000);
}
error:
- printk(KERN_INFO "DMI not present or invalid.\n");
+ pr_info("DMI not present or invalid.\n");
out:
dmi_initialized = 1;
}
@@ -669,7 +654,7 @@ int dmi_name_in_serial(const char *str)
/**
* dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
- * @str: Case sensitive Name
+ * @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
@@ -696,13 +681,13 @@ EXPORT_SYMBOL(dmi_name_in_vendors);
* A new search is initiated by passing %NULL as the @from argument.
* If @from is not %NULL, searches continue from next device.
*/
-const struct dmi_device * dmi_find_device(int type, const char *name,
+const struct dmi_device *dmi_find_device(int type, const char *name,
const struct dmi_device *from)
{
const struct list_head *head = from ? &from->list : &dmi_devices;
struct list_head *d;
- for(d = head->next; d != &dmi_devices; d = d->next) {
+ for (d = head->next; d != &dmi_devices; d = d->next) {
const struct dmi_device *dev =
list_entry(d, struct dmi_device, list);
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index acba0b9f440..6eb535ffedd 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -525,7 +525,7 @@ static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
u32 data_type;
} param;
- rc = strict_strtoul(buf, 0, &val);
+ rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ba9876ffb01..0dfaf20e4da 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -195,8 +195,8 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
return;
for (;; index++) {
- ret = of_parse_phandle_with_args(np, "gpio-ranges",
- "#gpio-range-cells", index, &pinspec);
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ index, &pinspec);
if (ret)
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab9246e1b..a6f4cb5af18 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat, cagf;
+ u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_get(dev_priv);
+ reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ reqf *= GT_FREQUENCY_MULTIPLIER;
+
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa0915ce5..9b265a4c6a3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
- goto out;
+ if (!HAS_PCH_SPLIT(dev)) {
+ ret = vga_client_register(dev->pdev, dev, NULL,
+ i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+ }
intel_register_dsm_handler();
@@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
+ /*
+ * Must do this after fbcon init so that
+ * vgacon_save_screen() works during the handover.
+ */
+ i915_disable_vga_mem(dev);
+
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ead350..69d8ed5416c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
static struct drm_driver driver;
extern int intel_agp_enabled;
-#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = id, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = 0x16a, \
- .subvendor = 0x152d, \
- .subdevice = 0x8990, \
- .driver_data = (unsigned long) info }
-
-
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_vebox_ring = 1,
};
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+ INTEL_I830_IDS(&intel_i830_info), \
+ INTEL_I845G_IDS(&intel_845g_info), \
+ INTEL_I85X_IDS(&intel_i85x_info), \
+ INTEL_I865G_IDS(&intel_i865g_info), \
+ INTEL_I915G_IDS(&intel_i915g_info), \
+ INTEL_I915GM_IDS(&intel_i915gm_info), \
+ INTEL_I945G_IDS(&intel_i945g_info), \
+ INTEL_I945GM_IDS(&intel_i945gm_info), \
+ INTEL_I965G_IDS(&intel_i965g_info), \
+ INTEL_G33_IDS(&intel_g33_info), \
+ INTEL_I965GM_IDS(&intel_i965gm_info), \
+ INTEL_GM45_IDS(&intel_gm45_info), \
+ INTEL_G45_IDS(&intel_g45_info), \
+ INTEL_PINEVIEW_IDS(&intel_pineview_info), \
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
+ INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+ INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
static const struct pci_device_id pciidlist[] = { /* aka */
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
- INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
- INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
- INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
- INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
- INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
- INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
- INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
- INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
- INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
- INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
- INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
- INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
- INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
- INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
- INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
- INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
- INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
- INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
- INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
- INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
- INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
- INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
- INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
- INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
- INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
- INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
- INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+ INTEL_PCI_IDS,
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785a3fd..35874b3a86d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
+ /**
+ * wq - Driver workqueue for GEM.
+ *
+ * NOTE: Work items scheduled here are not allowed to grab any modeset
+ * locks, for otherwise the flushing done in the pageflip code will
+ * result in deadlocks.
+ */
struct workqueue_struct *wq;
/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10d846..d9e337feef1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+ return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@ static long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
+ struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
- global_list) {
+ /*
+ * As we may completely rewrite the bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ */
+ INIT_LIST_HEAD(&still_bound_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_bound_list);
+
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
+ /*
+ * Hold a reference whilst we unbind this object, as we may
+ * end up waiting for and retiring requests. This might
+ * release the final reference (held by the active list)
+ * and result in the object being freed from under us.
+ * in this object being freed.
+ *
+ * Note 1: Shrinking the bound list is special since only active
+ * (and hence bound objects) can contain such limbo objects, so
+ * we don't need special tricks for shrinking the unbound list.
+ * The only other place where we have to be careful with active
+ * objects suddenly disappearing due to retiring requests is the
+ * eviction code.
+ *
+ * Note 2: Even though the bound list doesn't hold a reference
+ * to the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ drm_gem_object_reference(&obj->base);
+
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
- if (!i915_gem_object_put_pages(obj)) {
+ if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
+
+ drm_gem_object_unreference(&obj->base);
}
+ list_splice(&still_bound_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- sg_free_table(st);
kfree(st);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05fcbd..7d5752fda5f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = i915_gem_object_get_pages(obj);
- if (ret) {
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret = -ENOMEM;
+ goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
- if (ret) {
- kfree(st);
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- sg_free_table(st);
- kfree(st);
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret =-ENOMEM;
+ goto err_free_sg;
}
- i915_gem_object_pin_pages(obj);
-
-out:
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a235e..bf345777ae9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
else
ret = relocate_entry_gtt(obj, reloc);
+ if (ret)
+ return ret;
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b80f..e15a1d90037 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568d5b4..aba9d749899 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS))
return NULL;
- obj = ring->private;
+ obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445ceb5..83cce0cdb76 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+ /*
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pagelips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
- DRM_ERROR("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
+ DRM_INFO("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
rings_hung++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f720f9..c159e1a6810 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-
-
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050
+#define DERRMR_PIPEA_SCANLINE (1<<0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
+#define DERRMR_PIPEA_VBLANK (1<<3)
+#define DERRMR_PIPEA_HBLANK (1<<5)
+#define DERRMR_PIPEB_SCANLINE (1<<8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
+#define DERRMR_PIPEB_VBLANK (1<<11)
+#define DERRMR_PIPEB_HBLANK (1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1<<14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
+#define DERRMR_PIPEC_VBLANK (1<<21)
+#define DERRMR_PIPEC_HBLANK (1<<22)
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f3b0d..c8c4112de11 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay));
+}
+
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_vlv_rpe_freq_mhz.attr,
+ NULL,
+};
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ ret = 0;
+ if (IS_VALLEYVIEW(dev))
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
- if (ret)
- DRM_ERROR("gen6 sysfs setup failed\n");
- }
+ if (ret)
+ DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (IS_VALLEYVIEW(dev))
+ sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875f22c..ea9022ef15d 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d82ac7..2489d0b4c7d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else
dspcntr &= ~DISPPLANE_TILED;
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+ else
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
@@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
+ cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+ }
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
}
- pipe_config->adjusted_mode.clock = clock.dot *
- pipe_config->pixel_multiplier;
+ pipe_config->adjusted_mode.clock = clock.dot;
}
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@ err:
return ret;
}
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued. Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
- int ret;
+ int len, ret;
+
+ ring = obj->ring;
+ if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
@@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin;
}
- ret = intel_ring_begin(ring, 4);
+ len = 4;
+ if (ring->id == RCS)
+ len += 6;
+
+ ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
+ /* Unmask the flip-done completion message. Note that the bspec says that
+ * we should do this for both the BCS and RCS, and that we must not unmask
+ * more than one flip event at any time (or ensure that one flip message
+ * can be sent by waiting for flip-done prior to queueing new flips).
+ * Experimentation says that BCS works despite DERRMR masking all
+ * flip-done completion events and that unmasking all planes at once
+ * for the RCS also doesn't appear to drop events. Setting the DERRMR
+ * to zero does lead to lockups within MI_DISPLAY_FLIP.
+ */
+ if (ring->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ }
+
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void i915_enable_vga_mem(struct drm_device *dev)
+{
+ /* Enable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
+void i915_disable_vga_mem(struct drm_device *dev)
+{
+ /* Disable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
void intel_modeset_init_hw(struct drm_device *dev)
{
intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
+ i915_disable_vga_mem(dev);
}
}
@@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
+ i915_enable_vga_mem(dev);
+
intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 176080822a7..a47799e832c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+extern void i915_disable_vga_mem(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278e31f..831a5c021c4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *fixed_mode =
- lvds_encoder->attached_connector->base.panel.fixed_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
- if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb68f09..119771ff46a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAILED;
intel_panel_set_backlight(dev, bclp, 255);
- iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+ iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33bc4a3..42114ecbae0 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
+ drm_mode_copy(adjusted_mode, fixed_mode);
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 46056820d1d..0c115cc4899 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
+
/* only unmask PM interrupts we need. Mask all others. */
- I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+ enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
-
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05cceac5a5..460ee1026fc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/*
@@ -481,68 +468,43 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc;
- struct drm_i915_gem_object *obj;
int ret;
- if (ring->private)
+ if (ring->scratch.obj)
return 0;
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL) {
+ ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
- pc->cpu_page = kmap(sg_page(obj->pages->sgl));
- if (pc->cpu_page == NULL) {
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+ ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+ if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, pc->gtt_offset);
-
- pc->obj = obj;
- ring->private = pc;
+ ring->name, ring->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(ring->scratch.obj);
err_unref:
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&ring->scratch.obj->base);
err:
- kfree(pc);
return ret;
}
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
- struct pipe_control *pc = ring->private;
- struct drm_i915_gem_object *obj;
-
- obj = pc->obj;
-
- kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
-
- kfree(pc);
-}
-
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- if (!ring->private)
+ if (ring->scratch.obj == NULL)
return;
- if (HAS_BROKEN_CS_TLB(dev))
- drm_gem_object_unreference(to_gem_object(ring->private));
-
- if (INTEL_INFO(dev)->gen >= 5)
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_unpin(ring->scratch.obj);
+ }
- ring->private = NULL;
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
}
static void
@@ -742,8 +704,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct pipe_control *pc = ring->private;
- return pc->cpu_page[0];
+ return ring->scratch.cpu_page[0];
}
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct pipe_control *pc = ring->private;
- pc->cpu_page[0] = seqno;
+ ring->scratch.cpu_page[0] = seqno;
}
static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
- struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+ u32 cs_offset = ring->scratch.gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->private = obj;
+ ring->scratch.obj = obj;
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad5311ba..68b1ca974d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck;
- void *private;
+ struct {
+ struct drm_i915_gem_object *obj;
+ u32 gtt_offset;
+ volatile u32 *cpu_page;
+ } scratch;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058fb3c..85037b9d493 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (intel_crtc->config.pixel_multiplier) {
+ switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621cdd10..ad6ec4b3900 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
- /* must disable */
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+ else
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc869c02..8649f1c36b0 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
}
}
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ intel_uncore_forcewake_reset(dev);
+
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644024b..e893c536240 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e1937..af025970835 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (!conflict->bridge_has_one_vga) {
vga_irq_set_state(conflict, false);
flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
- if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
- conflict->owns &= ~lwants;
+ conflict->owns &= ~match;
/* If he also owned non-legacy, that is no longer the case */
- if (lwants & VGA_RSRC_LEGACY_MEM)
+ if (match & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (lwants & VGA_RSRC_LEGACY_IO)
+ if (match & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
@@ -644,10 +644,12 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
- int old_decodes;
- struct vga_device *new_vgadev, *conflict;
+ int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
+ decodes_removed = ~new_decodes & old_decodes;
+ decodes_unlocked = vgadev->locks & decodes_removed;
+ vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
-
- /* if we own the decodes we should move them along to
- another card */
- if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
- /* set us to own nothing */
- vgadev->owns &= ~old_decodes;
- list_for_each_entry(new_vgadev, &vga_list, list) {
- if ((new_vgadev != vgadev) &&
- (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
- conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
- if (!conflict)
- __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
- break;
- }
- }
+ /* if we removed locked decodes, lock count goes to zero, and release */
+ if (decodes_unlocked) {
+ if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt = 0;
+ if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt = 0;
+ __vga_put(vgadev, decodes_unlocked);
}
/* change decodes counter */
- if (old_decodes != new_decodes) {
- if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
- vga_decode_count++;
- else
- vga_decode_count--;
- }
+ if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+ !(new_decodes & VGA_RSRC_LEGACY_MASK))
+ vga_decode_count--;
+ if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+ new_decodes & VGA_RSRC_LEGACY_MASK)
+ vga_decode_count++;
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 89cfd64b337..ef91b8a6754 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -246,7 +246,7 @@ static struct vrm_model vrm_models[] = {
*/
static u8 get_via_model_d_vrm(void)
{
- unsigned int vid, brand, dummy;
+ unsigned int vid, brand, __maybe_unused dummy;
static const char *brands[4] = {
"C7-M", "C7", "Eden", "C7-D"
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 18c062360ca..70a39a8ac01 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -233,8 +233,7 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENOMEM;
if (dev_get_platdata(&client->dev)) {
- pdata =
- (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
+ pdata = dev_get_platdata(&client->dev);
shunt = pdata->shunt_uohms;
} else if (!of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val)) {
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 0a1c9626aa9..08ba4972da9 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -282,7 +282,6 @@ static int msm_iommu_remove(struct platform_device *pdev)
clk_put(drv->pclk);
memset(drv, 0, sizeof(*drv));
kfree(drv);
- platform_set_drvdata(pdev, NULL);
}
return 0;
}
@@ -366,7 +365,6 @@ static int msm_iommu_ctx_remove(struct platform_device *pdev)
if (drv) {
memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
kfree(drv);
- platform_set_drvdata(pdev, NULL);
}
return 0;
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 0ba3766240d..bcd78a72063 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1008,8 +1008,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct resource *res;
struct omap_iommu *obj = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
iopgtable_clear_entry_all(obj);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5ef78efc27f..2acc43fe022 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,7 @@
#
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
- dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
+ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0df3ec085eb..29569768ffb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
#define MIGRATION_COUNT_WINDOW 10
/*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
*/
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
/*
* FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@ struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
+ struct dm_cache_metadata *cmd;
+
/*
* Metadata is written to this device.
*/
@@ -117,11 +121,6 @@ struct cache {
struct dm_dev *cache_dev;
/*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- /*
* Size of the origin device in _complete_ blocks and native sectors.
*/
dm_oblock_t origin_blocks;
@@ -138,8 +137,6 @@ struct cache {
uint32_t sectors_per_block;
int sectors_per_block_shift;
- struct dm_cache_metadata *cmd;
-
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@ struct cache {
struct list_head completed_migrations;
struct list_head need_commit_migrations;
sector_t migration_threshold;
- atomic_t nr_migrations;
wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
/*
* cache_size entries, dirty if set
@@ -160,9 +157,16 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
@@ -187,14 +191,12 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
- struct cache_stats stats;
-
/*
- * Rather than reconstructing the table line for the status we just
- * save it and regurgitate.
+ * Cache features such as write-through.
*/
- unsigned nr_ctr_args;
- const char **ctr_args;
+ struct cache_features features;
+
+ struct cache_stats stats;
};
struct per_bio_data {
@@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
- unsigned long tmp;
+ unsigned long block_size;
if (!at_least_one_arg(as, error))
return -EINVAL;
- if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
- tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
- tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+ block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+ block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
*error = "Invalid data block size";
return -EINVAL;
}
- if (tmp > ca->cache_sectors) {
+ if (block_size > ca->cache_sectors) {
*error = "Data block size is larger than the cache device";
return -EINVAL;
}
- ca->block_size = tmp;
+ ca->block_size = block_size;
return 0;
}
@@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct cache *cache = ti->private;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * cache's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < cache->sectors_per_block ||
+ do_div(io_opt_sectors, cache->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ }
set_discard_limits(cache, limits);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6d2d41ae9e3..0fce0bc1a95 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io",
- WQ_NON_REENTRANT|
- WQ_MEM_RECLAIM,
- 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_NON_REENTRANT|
- WQ_CPU_INTENSIVE|
- WQ_MEM_RECLAIM,
- 1);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f1b758675ec..afe08146f73 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -877,7 +877,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
- invalid_str(new_data, (void *) param + param_size) ||
+ invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
DMWARN("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
@@ -1262,44 +1262,37 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
r = dm_table_create(&t, get_mode(param), param->target_count, md);
if (r)
- goto out;
+ goto err;
+ /* Protect md->type and md->queue against concurrent table loads. */
+ dm_lock_md_type(md);
r = populate_table(t, param, param_size);
- if (r) {
- dm_table_destroy(t);
- goto out;
- }
+ if (r)
+ goto err_unlock_md_type;
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
- dm_table_destroy(t);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
- /* Protect md->type and md->queue against concurrent table loads. */
- dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
- goto out;
+ goto err_unlock_md_type;
}
dm_unlock_md_type(md);
@@ -1309,9 +1302,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table.");
up_write(&_hash_lock);
- dm_table_destroy(t);
r = -ENXIO;
- goto out;
+ goto err_destroy_table;
}
if (hc->new_map)
@@ -1322,7 +1314,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
param->flags |= DM_INACTIVE_PRESENT_FLAG;
__dev_status(md, param);
-out:
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
@@ -1330,6 +1321,15 @@ out:
dm_put(md);
+ return 0;
+
+err_unlock_md_type:
+ dm_unlock_md_type(md);
+err_destroy_table:
+ dm_table_destroy(t);
+err:
+ dm_put(md);
+
return r;
}
@@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
-static bool buffer_test_overflow(char *result, unsigned maxlen)
-{
- return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
/*
- * Process device-mapper dependent messages.
+ * Process device-mapper dependent messages. Messages prefixed with '@'
+ * are processed by the DM core. All others are delivered to the target.
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen)
{
- return 2;
+ int r;
+
+ if (**argv != '@')
+ return 2; /* no '@' prefix, deliver to target */
+
+ r = dm_stats_message(md, argc, argv, result, maxlen);
+ if (r < 2)
+ return r;
+
+ DMERR("Unsupported message sent to DM core: %s", argv[0]);
+ return -EINVAL;
}
/*
@@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (r == 1) {
param->flags |= DM_DATA_OUT_FLAG;
- if (buffer_test_overflow(result, maxlen))
+ if (dm_message_test_buffer_overflow(result, maxlen))
param->flags |= DM_BUFFER_FULL_FLAG;
else
param->data_size = param->data_start + strlen(result) + 1;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d581fe5d2fa..3a7cade5e27 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
- kc->kcopyd_wq = alloc_workqueue("kcopyd",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
if (!kc->kcopyd_wq)
goto bad_workqueue;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 699b5be68d3..9584443c561 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
- ms->kmirrord_wq = alloc_workqueue("kmirrord",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
new file mode 100644
index 00000000000..8ae31e8d3d6
--- /dev/null
+++ b/drivers/md/dm-stats.c
@@ -0,0 +1,969 @@
+#include <linux/errno.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/threads.h>
+#include <linux/preempt.h>
+#include <linux/irqflags.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+
+#include "dm.h"
+#include "dm-stats.h"
+
+#define DM_MSG_PREFIX "stats"
+
+static int dm_stat_need_rcu_barrier;
+
+/*
+ * Using 64-bit values to avoid overflow (which is a
+ * problem that block/genhd.c's IO accounting has).
+ */
+struct dm_stat_percpu {
+ unsigned long long sectors[2];
+ unsigned long long ios[2];
+ unsigned long long merges[2];
+ unsigned long long ticks[2];
+ unsigned long long io_ticks[2];
+ unsigned long long io_ticks_total;
+ unsigned long long time_in_queue;
+};
+
+struct dm_stat_shared {
+ atomic_t in_flight[2];
+ unsigned long stamp;
+ struct dm_stat_percpu tmp;
+};
+
+struct dm_stat {
+ struct list_head list_entry;
+ int id;
+ size_t n_entries;
+ sector_t start;
+ sector_t end;
+ sector_t step;
+ const char *program_id;
+ const char *aux_data;
+ struct rcu_head rcu_head;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *stat_percpu[NR_CPUS];
+ struct dm_stat_shared stat_shared[0];
+};
+
+struct dm_stats_last_position {
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+/*
+ * A typo on the command line could possibly make the kernel run out of memory
+ * and crash. To prevent the crash we account all used memory. We fail if we
+ * exhaust 1/4 of all memory or 1/2 of vmalloc space.
+ */
+#define DM_STATS_MEMORY_FACTOR 4
+#define DM_STATS_VMALLOC_FACTOR 2
+
+static DEFINE_SPINLOCK(shared_memory_lock);
+
+static unsigned long shared_memory_amount;
+
+static bool __check_shared_memory(size_t alloc_size)
+{
+ size_t a;
+
+ a = shared_memory_amount + alloc_size;
+ if (a < shared_memory_amount)
+ return false;
+ if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
+ return false;
+#ifdef CONFIG_MMU
+ if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+ return false;
+#endif
+ return true;
+}
+
+static bool check_shared_memory(size_t alloc_size)
+{
+ bool ret;
+
+ spin_lock_irq(&shared_memory_lock);
+
+ ret = __check_shared_memory(alloc_size);
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return ret;
+}
+
+static bool claim_shared_memory(size_t alloc_size)
+{
+ spin_lock_irq(&shared_memory_lock);
+
+ if (!__check_shared_memory(alloc_size)) {
+ spin_unlock_irq(&shared_memory_lock);
+ return false;
+ }
+
+ shared_memory_amount += alloc_size;
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return true;
+}
+
+static void free_shared_memory(size_t alloc_size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&shared_memory_lock, flags);
+
+ if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+ DMCRIT("Memory usage accounting bug.");
+ return;
+ }
+
+ shared_memory_amount -= alloc_size;
+
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+}
+
+static void *dm_kvzalloc(size_t alloc_size, int node)
+{
+ void *p;
+
+ if (!claim_shared_memory(alloc_size))
+ return NULL;
+
+ if (alloc_size <= KMALLOC_MAX_SIZE) {
+ p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
+ if (p)
+ return p;
+ }
+ p = vzalloc_node(alloc_size, node);
+ if (p)
+ return p;
+
+ free_shared_memory(alloc_size);
+
+ return NULL;
+}
+
+static void dm_kvfree(void *ptr, size_t alloc_size)
+{
+ if (!ptr)
+ return;
+
+ free_shared_memory(alloc_size);
+
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+static void dm_stat_free(struct rcu_head *head)
+{
+ int cpu;
+ struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+
+ kfree(s->program_id);
+ kfree(s->aux_data);
+ for_each_possible_cpu(cpu)
+ dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+ dm_kvfree(s, s->shared_alloc_size);
+}
+
+static int dm_stat_in_flight(struct dm_stat_shared *shared)
+{
+ return atomic_read(&shared->in_flight[READ]) +
+ atomic_read(&shared->in_flight[WRITE]);
+}
+
+void dm_stats_init(struct dm_stats *stats)
+{
+ int cpu;
+ struct dm_stats_last_position *last;
+
+ mutex_init(&stats->mutex);
+ INIT_LIST_HEAD(&stats->list);
+ stats->last = alloc_percpu(struct dm_stats_last_position);
+ for_each_possible_cpu(cpu) {
+ last = per_cpu_ptr(stats->last, cpu);
+ last->last_sector = (sector_t)ULLONG_MAX;
+ last->last_rw = UINT_MAX;
+ }
+}
+
+void dm_stats_cleanup(struct dm_stats *stats)
+{
+ size_t ni;
+ struct dm_stat *s;
+ struct dm_stat_shared *shared;
+
+ while (!list_empty(&stats->list)) {
+ s = container_of(stats->list.next, struct dm_stat, list_entry);
+ list_del(&s->list_entry);
+ for (ni = 0; ni < s->n_entries; ni++) {
+ shared = &s->stat_shared[ni];
+ if (WARN_ON(dm_stat_in_flight(shared))) {
+ DMCRIT("leaked in-flight counter at index %lu "
+ "(start %llu, end %llu, step %llu): reads %d, writes %d",
+ (unsigned long)ni,
+ (unsigned long long)s->start,
+ (unsigned long long)s->end,
+ (unsigned long long)s->step,
+ atomic_read(&shared->in_flight[READ]),
+ atomic_read(&shared->in_flight[WRITE]));
+ }
+ }
+ dm_stat_free(&s->rcu_head);
+ }
+ free_percpu(stats->last);
+}
+
+static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+ sector_t step, const char *program_id, const char *aux_data,
+ void (*suspend_callback)(struct mapped_device *),
+ void (*resume_callback)(struct mapped_device *),
+ struct mapped_device *md)
+{
+ struct list_head *l;
+ struct dm_stat *s, *tmp_s;
+ sector_t n_entries;
+ size_t ni;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *p;
+ int cpu;
+ int ret_id;
+ int r;
+
+ if (end < start || !step)
+ return -EINVAL;
+
+ n_entries = end - start;
+ if (dm_sector_div64(n_entries, step))
+ n_entries++;
+
+ if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
+ return -EOVERFLOW;
+
+ shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
+ if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
+ return -EOVERFLOW;
+
+ percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
+ if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
+ return -EOVERFLOW;
+
+ if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+ return -ENOMEM;
+
+ s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
+ if (!s)
+ return -ENOMEM;
+
+ s->n_entries = n_entries;
+ s->start = start;
+ s->end = end;
+ s->step = step;
+ s->shared_alloc_size = shared_alloc_size;
+ s->percpu_alloc_size = percpu_alloc_size;
+
+ s->program_id = kstrdup(program_id, GFP_KERNEL);
+ if (!s->program_id) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!s->aux_data) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ for (ni = 0; ni < n_entries; ni++) {
+ atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+ atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
+ if (!p) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->stat_percpu[cpu] = p;
+ }
+
+ /*
+ * Suspend/resume to make sure there is no i/o in flight,
+ * so that newly created statistics will be exact.
+ *
+ * (note: we couldn't suspend earlier because we must not
+ * allocate memory while suspended)
+ */
+ suspend_callback(md);
+
+ mutex_lock(&stats->mutex);
+ s->id = 0;
+ list_for_each(l, &stats->list) {
+ tmp_s = container_of(l, struct dm_stat, list_entry);
+ if (WARN_ON(tmp_s->id < s->id)) {
+ r = -EINVAL;
+ goto out_unlock_resume;
+ }
+ if (tmp_s->id > s->id)
+ break;
+ if (unlikely(s->id == INT_MAX)) {
+ r = -ENFILE;
+ goto out_unlock_resume;
+ }
+ s->id++;
+ }
+ ret_id = s->id;
+ list_add_tail_rcu(&s->list_entry, l);
+ mutex_unlock(&stats->mutex);
+
+ resume_callback(md);
+
+ return ret_id;
+
+out_unlock_resume:
+ mutex_unlock(&stats->mutex);
+ resume_callback(md);
+out:
+ dm_stat_free(&s->rcu_head);
+ return r;
+}
+
+static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (s->id > id)
+ break;
+ if (s->id == id)
+ return s;
+ }
+
+ return NULL;
+}
+
+static int dm_stats_delete(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+ int cpu;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ list_del_rcu(&s->list_entry);
+ mutex_unlock(&stats->mutex);
+
+ /*
+ * vfree can't be called from RCU callback
+ */
+ for_each_possible_cpu(cpu)
+ if (is_vmalloc_addr(s->stat_percpu))
+ goto do_sync_free;
+ if (is_vmalloc_addr(s)) {
+do_sync_free:
+ synchronize_rcu_expedited();
+ dm_stat_free(&s->rcu_head);
+ } else {
+ ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ call_rcu(&s->rcu_head, dm_stat_free);
+ }
+ return 0;
+}
+
+static int dm_stats_list(struct dm_stats *stats, const char *program,
+ char *result, unsigned maxlen)
+{
+ struct dm_stat *s;
+ sector_t len;
+ unsigned sz = 0;
+
+ /*
+ * Output format:
+ * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+ */
+
+ mutex_lock(&stats->mutex);
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (!program || !strcmp(program, s->program_id)) {
+ len = s->end - s->start;
+ DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+ (unsigned long long)s->start,
+ (unsigned long long)len,
+ (unsigned long long)s->step,
+ s->program_id,
+ s->aux_data);
+ }
+ }
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+{
+ /*
+ * This is racy, but so is part_round_stats_single.
+ */
+ unsigned long now = jiffies;
+ unsigned in_flight_read;
+ unsigned in_flight_write;
+ unsigned long difference = now - shared->stamp;
+
+ if (!difference)
+ return;
+ in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+ in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+ if (in_flight_read)
+ p->io_ticks[READ] += difference;
+ if (in_flight_write)
+ p->io_ticks[WRITE] += difference;
+ if (in_flight_read + in_flight_write) {
+ p->io_ticks_total += difference;
+ p->time_in_queue += (in_flight_read + in_flight_write) * difference;
+ }
+ shared->stamp = now;
+}
+
+static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+ unsigned long bi_rw, sector_t len, bool merged,
+ bool end, unsigned long duration)
+{
+ unsigned long idx = bi_rw & REQ_WRITE;
+ struct dm_stat_shared *shared = &s->stat_shared[entry];
+ struct dm_stat_percpu *p;
+
+ /*
+ * For strict correctness we should use local_irq_disable/enable
+ * instead of preempt_disable/enable.
+ *
+ * This is racy if the driver finishes bios from non-interrupt
+ * context as well as from interrupt context or from more different
+ * interrupts.
+ *
+ * However, the race only results in not counting some events,
+ * so it is acceptable.
+ *
+ * part_stat_lock()/part_stat_unlock() have this race too.
+ */
+ preempt_disable();
+ p = &s->stat_percpu[smp_processor_id()][entry];
+
+ if (!end) {
+ dm_stat_round(shared, p);
+ atomic_inc(&shared->in_flight[idx]);
+ } else {
+ dm_stat_round(shared, p);
+ atomic_dec(&shared->in_flight[idx]);
+ p->sectors[idx] += len;
+ p->ios[idx] += 1;
+ p->merges[idx] += merged;
+ p->ticks[idx] += duration;
+ }
+
+ preempt_enable();
+}
+
+static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+ sector_t bi_sector, sector_t end_sector,
+ bool end, unsigned long duration,
+ struct dm_stats_aux *stats_aux)
+{
+ sector_t rel_sector, offset, todo, fragment_len;
+ size_t entry;
+
+ if (end_sector <= s->start || bi_sector >= s->end)
+ return;
+ if (unlikely(bi_sector < s->start)) {
+ rel_sector = 0;
+ todo = end_sector - s->start;
+ } else {
+ rel_sector = bi_sector - s->start;
+ todo = end_sector - bi_sector;
+ }
+ if (unlikely(end_sector > s->end))
+ todo -= (end_sector - s->end);
+
+ offset = dm_sector_div64(rel_sector, s->step);
+ entry = rel_sector;
+ do {
+ if (WARN_ON_ONCE(entry >= s->n_entries)) {
+ DMCRIT("Invalid area access in region id %d", s->id);
+ return;
+ }
+ fragment_len = todo;
+ if (fragment_len > s->step - offset)
+ fragment_len = s->step - offset;
+ dm_stat_for_entry(s, entry, bi_rw, fragment_len,
+ stats_aux->merged, end, duration);
+ todo -= fragment_len;
+ entry++;
+ offset = 0;
+ } while (unlikely(todo != 0));
+}
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *stats_aux)
+{
+ struct dm_stat *s;
+ sector_t end_sector;
+ struct dm_stats_last_position *last;
+
+ if (unlikely(!bi_sectors))
+ return;
+
+ end_sector = bi_sector + bi_sectors;
+
+ if (!end) {
+ /*
+ * A race condition can at worst result in the merged flag being
+ * misrepresented, so we don't have to disable preemption here.
+ */
+ last = __this_cpu_ptr(stats->last);
+ stats_aux->merged =
+ (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
+ (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+ ));
+ ACCESS_ONCE(last->last_sector) = end_sector;
+ ACCESS_ONCE(last->last_rw) = bi_rw;
+ }
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(s, &stats->list, list_entry)
+ __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+
+ rcu_read_unlock();
+}
+
+static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
+ struct dm_stat *s, size_t x)
+{
+ int cpu;
+ struct dm_stat_percpu *p;
+
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ dm_stat_round(shared, p);
+ local_irq_enable();
+
+ memset(&shared->tmp, 0, sizeof(shared->tmp));
+ for_each_possible_cpu(cpu) {
+ p = &s->stat_percpu[cpu][x];
+ shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ }
+}
+
+static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+ bool init_tmp_percpu_totals)
+{
+ size_t x;
+ struct dm_stat_shared *shared;
+ struct dm_stat_percpu *p;
+
+ for (x = idx_start; x < idx_end; x++) {
+ shared = &s->stat_shared[x];
+ if (init_tmp_percpu_totals)
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ p->sectors[READ] -= shared->tmp.sectors[READ];
+ p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
+ p->ios[READ] -= shared->tmp.ios[READ];
+ p->ios[WRITE] -= shared->tmp.ios[WRITE];
+ p->merges[READ] -= shared->tmp.merges[READ];
+ p->merges[WRITE] -= shared->tmp.merges[WRITE];
+ p->ticks[READ] -= shared->tmp.ticks[READ];
+ p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
+ p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
+ p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
+ p->io_ticks_total -= shared->tmp.io_ticks_total;
+ p->time_in_queue -= shared->tmp.time_in_queue;
+ local_irq_enable();
+ }
+}
+
+static int dm_stats_clear(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ __dm_stat_clear(s, 0, s->n_entries, true);
+
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+/*
+ * This is like jiffies_to_msec, but works for 64-bit values.
+ */
+static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+{
+ unsigned long long result = 0;
+ unsigned mult;
+
+ if (j)
+ result = jiffies_to_msecs(j & 0x3fffff);
+ if (j >= 1 << 22) {
+ mult = jiffies_to_msecs(1 << 22);
+ result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
+ }
+ if (j >= 1ULL << 44)
+ result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
+
+ return result;
+}
+
+static int dm_stats_print(struct dm_stats *stats, int id,
+ size_t idx_start, size_t idx_len,
+ bool clear, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct dm_stat *s;
+ size_t x;
+ sector_t start, end, step;
+ size_t idx_end;
+ struct dm_stat_shared *shared;
+
+ /*
+ * Output format:
+ * <start_sector>+<length> counters
+ */
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ idx_end = idx_start + idx_len;
+ if (idx_end < idx_start ||
+ idx_end > s->n_entries)
+ idx_end = s->n_entries;
+
+ if (idx_start > idx_end)
+ idx_start = idx_end;
+
+ step = s->step;
+ start = s->start + (step * idx_start);
+
+ for (x = idx_start; x < idx_end; x++, start = end) {
+ shared = &s->stat_shared[x];
+ end = start + step;
+ if (unlikely(end > s->end))
+ end = s->end;
+
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+
+ DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+ (unsigned long long)start,
+ (unsigned long long)step,
+ shared->tmp.ios[READ],
+ shared->tmp.merges[READ],
+ shared->tmp.sectors[READ],
+ dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+ shared->tmp.ios[WRITE],
+ shared->tmp.merges[WRITE],
+ shared->tmp.sectors[WRITE],
+ dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+ dm_stat_in_flight(shared),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
+ dm_jiffies_to_msec64(shared->tmp.time_in_queue),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+
+ if (unlikely(sz + 1 >= maxlen))
+ goto buffer_overflow;
+ }
+
+ if (clear)
+ __dm_stat_clear(s, idx_start, idx_end, false);
+
+buffer_overflow:
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
+{
+ struct dm_stat *s;
+ const char *new_aux_data;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ new_aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!new_aux_data) {
+ mutex_unlock(&stats->mutex);
+ return -ENOMEM;
+ }
+
+ kfree(s->aux_data);
+ s->aux_data = new_aux_data;
+
+ mutex_unlock(&stats->mutex);
+
+ return 0;
+}
+
+static int message_stats_create(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long long start, end, len, step;
+ unsigned divisor;
+ const char *program_id, *aux_data;
+
+ /*
+ * Input format:
+ * <range> <step> [<program_id> [<aux_data>]]
+ */
+
+ if (argc < 3 || argc > 5)
+ return -EINVAL;
+
+ if (!strcmp(argv[1], "-")) {
+ start = 0;
+ len = dm_get_size(md);
+ if (!len)
+ len = 1;
+ } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+ start != (sector_t)start || len != (sector_t)len)
+ return -EINVAL;
+
+ end = start + len;
+ if (start >= end)
+ return -EINVAL;
+
+ if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+ step = end - start;
+ if (do_div(step, divisor))
+ step++;
+ if (!step)
+ step = 1;
+ } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+ step != (sector_t)step || !step)
+ return -EINVAL;
+
+ program_id = "-";
+ aux_data = "-";
+
+ if (argc > 3)
+ program_id = argv[3];
+
+ if (argc > 4)
+ aux_data = argv[4];
+
+ /*
+ * If a buffer overflow happens after we created the region,
+ * it's too late (the userspace would retry with a larger
+ * buffer, but the region id that caused the overflow is already
+ * leaked). So we must detect buffer overflow in advance.
+ */
+ snprintf(result, maxlen, "%d", INT_MAX);
+ if (dm_message_test_buffer_overflow(result, maxlen))
+ return 1;
+
+ id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+ dm_internal_suspend, dm_internal_resume, md);
+ if (id < 0)
+ return id;
+
+ snprintf(result, maxlen, "%d", id);
+
+ return 1;
+}
+
+static int message_stats_delete(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_delete(dm_get_stats(md), id);
+}
+
+static int message_stats_clear(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_clear(dm_get_stats(md), id);
+}
+
+static int message_stats_list(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+ const char *program = NULL;
+
+ if (argc < 1 || argc > 2)
+ return -EINVAL;
+
+ if (argc > 1) {
+ program = kstrdup(argv[1], GFP_KERNEL);
+ if (!program)
+ return -ENOMEM;
+ }
+
+ r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
+
+ kfree(program);
+
+ return r;
+}
+
+static int message_stats_print(struct mapped_device *md,
+ unsigned argc, char **argv, bool clear,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long idx_start = 0, idx_len = ULONG_MAX;
+
+ if (argc != 2 && argc != 4)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ if (argc > 3) {
+ if (strcmp(argv[2], "-") &&
+ sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
+ return -EINVAL;
+ if (strcmp(argv[3], "-") &&
+ sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
+ return -EINVAL;
+ }
+
+ return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
+ result, maxlen);
+}
+
+static int message_stats_set_aux(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 3)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+}
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+
+ if (dm_request_based(md)) {
+ DMWARN("Statistics are only supported for bio-based devices");
+ return -EOPNOTSUPP;
+ }
+
+ /* All messages here must start with '@' */
+ if (!strcasecmp(argv[0], "@stats_create"))
+ r = message_stats_create(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_delete"))
+ r = message_stats_delete(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_clear"))
+ r = message_stats_clear(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_list"))
+ r = message_stats_list(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print"))
+ r = message_stats_print(md, argc, argv, false, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print_clear"))
+ r = message_stats_print(md, argc, argv, true, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_set_aux"))
+ r = message_stats_set_aux(md, argc, argv);
+ else
+ return 2; /* this wasn't a stats message */
+
+ if (r == -EINVAL)
+ DMWARN("Invalid parameters for message %s", argv[0]);
+
+ return r;
+}
+
+int __init dm_statistics_init(void)
+{
+ dm_stat_need_rcu_barrier = 0;
+ return 0;
+}
+
+void dm_statistics_exit(void)
+{
+ if (dm_stat_need_rcu_barrier)
+ rcu_barrier();
+ if (WARN_ON(shared_memory_amount))
+ DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
+}
+
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
new file mode 100644
index 00000000000..e7c4984bf23
--- /dev/null
+++ b/drivers/md/dm-stats.h
@@ -0,0 +1,40 @@
+#ifndef DM_STATS_H
+#define DM_STATS_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+int dm_statistics_init(void);
+void dm_statistics_exit(void);
+
+struct dm_stats {
+ struct mutex mutex;
+ struct list_head list; /* list of struct dm_stat */
+ struct dm_stats_last_position __percpu *last;
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+struct dm_stats_aux {
+ bool merged;
+};
+
+void dm_stats_init(struct dm_stats *st);
+void dm_stats_cleanup(struct dm_stats *st);
+
+struct mapped_device;
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen);
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *aux);
+
+static inline bool dm_stats_used(struct dm_stats *st)
+{
+ return !list_empty(&st->list);
+}
+
+#endif
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d907ca6227c..73c1712dad9 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,6 +4,7 @@
* This file is released under the GPL.
*/
+#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/module.h>
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f221812b7db..8f8783533ac 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -860,14 +860,17 @@ EXPORT_SYMBOL(dm_consume_args);
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
- unsigned bio_based = 0, request_based = 0;
+ unsigned bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices;
+ unsigned live_md_type;
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
- if (dm_target_request_based(tgt))
+ if (dm_target_hybrid(tgt))
+ hybrid = 1;
+ else if (dm_target_request_based(tgt))
request_based = 1;
else
bio_based = 1;
@@ -879,6 +882,19 @@ static int dm_table_set_type(struct dm_table *t)
}
}
+ if (hybrid && !bio_based && !request_based) {
+ /*
+ * The targets can work either way.
+ * Determine the type from the live device.
+ * Default to bio-based if device is new.
+ */
+ live_md_type = dm_get_md_type(t->md);
+ if (live_md_type == DM_TYPE_REQUEST_BASED)
+ request_based = 1;
+ else
+ bio_based = 1;
+ }
+
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 37ba5db71cd..242e3cec397 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,19 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO;
}
+static int io_err_map_rq(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ return -EIO;
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
+ .map_rq = io_err_map_rq,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 88f2f802d52..ed063427d67 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -887,7 +887,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR_LIMIT("commit failed: error = %d", r);
+ DMERR_LIMIT("%s: commit failed: error = %d",
+ dm_device_name(pool->pool_md), r);
return r;
}
@@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
unsigned long flags;
struct pool *pool = tc->pool;
+ /*
+ * Once no_free_space is set we must not allow allocation to succeed.
+ * Otherwise it is difficult to explain, debug, test and support.
+ */
+ if (pool->no_free_space)
+ return -ENOSPC;
+
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
return r;
@@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
if (!free_blocks) {
- if (pool->no_free_space)
- return -ENOSPC;
- else {
- /*
- * Try to commit to see if that will free up some
- * more space.
- */
- (void) commit_or_fallback(pool);
+ /*
+ * Try to commit to see if that will free up some
+ * more space.
+ */
+ (void) commit_or_fallback(pool);
- r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+ return r;
- /*
- * If we still have no space we set a flag to avoid
- * doing all this checking and return -ENOSPC.
- */
- if (!free_blocks) {
- DMWARN("%s: no free space available.",
- dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
- pool->no_free_space = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
- return -ENOSPC;
- }
+ /*
+ * If we still have no space we set a flag to avoid
+ * doing all this checking and return -ENOSPC. This
+ * flag serves as a latch that disallows allocations from
+ * this pool until the admin takes action (e.g. resize or
+ * table reload).
+ */
+ if (!free_blocks) {
+ DMWARN("%s: no free space available.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->no_free_space = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return -ENOSPC;
}
}
@@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
r = alloc_data_block(tc, &data_block);
switch (r) {
@@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(tc->pool, cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- cell_error(tc->pool, cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
switch (mode) {
case PM_FAIL:
- DMERR("switching pool to failure mode");
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_READ_ONLY:
- DMERR("switching pool to read-only mode");
+ DMERR("%s: switching pool to read-only mode",
+ dm_device_name(pool->pool_md));
r = dm_pool_abort_metadata(pool->pmd);
if (r) {
- DMERR("aborting transaction failed");
+ DMERR("%s: aborting transaction failed",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_FAIL);
} else {
dm_pool_metadata_read_only(pool->pmd);
@@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve data device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (data_size < sb_data_size) {
- DMERR("pool target (%llu blocks) too small: expected %llu",
+ DMERR("%s: pool target (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
(unsigned long long)data_size, sb_data_size);
return -EINVAL;
} else if (data_size > sb_data_size) {
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
- DMERR("failed to resize data device");
+ DMERR("%s: failed to resize data device",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_READ_ONLY);
return r;
}
@@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve metadata device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (metadata_dev_size < sb_metadata_dev_size) {
- DMERR("metadata device (%llu blocks) too small: expected %llu",
+ DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
- DMERR("failed to resize metadata device");
+ DMERR("%s: failed to resize metadata device",
+ dm_device_name(pool->pool_md));
return r;
}
@@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type,
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
- DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
if (r) {
- DMERR("dm_pool_get_free_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
if (r) {
- DMERR("dm_pool_get_data_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_data_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
if (r) {
- DMERR("dm_pool_get_metadata_snap returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_snap returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
@@ -2648,9 +2672,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * pool's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < pool->sectors_per_block ||
+ do_div(io_opt_sectors, pool->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ }
/*
* pt->adjusted_pf is a staging area for the actual features to use.
@@ -2669,7 +2701,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2956,7 +2988,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9e39d2b64bf..6a5e9ed2fcc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,6 +60,7 @@ struct dm_io {
struct bio *bio;
unsigned long start_time;
spinlock_t endio_lock;
+ struct dm_stats_aux stats_aux;
};
/*
@@ -198,6 +199,8 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+
+ struct dm_stats stats;
};
/*
@@ -269,6 +272,7 @@ static int (*_inits[])(void) __initdata = {
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
+ dm_statistics_init,
};
static void (*_exits[])(void) = {
@@ -279,6 +283,7 @@ static void (*_exits[])(void) = {
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
+ dm_statistics_exit,
};
static int __init dm_init(void)
@@ -384,6 +389,16 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+sector_t dm_get_size(struct mapped_device *md)
+{
+ return get_capacity(md->disk);
+}
+
+struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+ return &md->stats;
+}
+
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -466,8 +481,9 @@ static int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ struct bio *bio = io->bio;
int cpu;
- int rw = bio_data_dir(io->bio);
+ int rw = bio_data_dir(bio);
io->start_time = jiffies;
@@ -476,6 +492,10 @@ static void start_io_acct(struct dm_io *io)
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
+
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
@@ -491,6 +511,10 @@ static void end_io_acct(struct dm_io *io)
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), true, duration, &io->stats_aux);
+
/*
* After this is decremented the bio must not be touched if it is
* a flush.
@@ -1519,7 +1543,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
return;
}
-static int dm_request_based(struct mapped_device *md)
+int dm_request_based(struct mapped_device *md)
{
return blk_queue_stackable(md->queue);
}
@@ -1946,8 +1970,7 @@ static struct mapped_device *alloc_dev(int minor)
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
- md->wq = alloc_workqueue("kdmflush",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
@@ -1959,6 +1982,8 @@ static struct mapped_device *alloc_dev(int minor)
md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_rw = WRITE_FLUSH;
+ dm_stats_init(&md->stats);
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -2010,6 +2035,7 @@ static void free_dev(struct mapped_device *md)
put_disk(md->disk);
blk_cleanup_queue(md->queue);
+ dm_stats_cleanup(&md->stats);
module_put(THIS_MODULE);
kfree(md);
}
@@ -2151,7 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != get_capacity(md->disk))
+ if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
@@ -2236,11 +2262,13 @@ void dm_unlock_md_type(struct mapped_device *md)
void dm_set_md_type(struct mapped_device *md, unsigned type)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
unsigned dm_get_md_type(struct mapped_device *md)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
return md->type;
}
@@ -2695,6 +2723,38 @@ out:
return r;
}
+/*
+ * Internal suspend/resume works like userspace-driven suspend. It waits
+ * until all bios finish and prevents issuing new bios to the target drivers.
+ * It may be used only from the kernel.
+ *
+ * Internal suspend holds md->suspend_lock, which prevents interaction with
+ * userspace-driven suspend.
+ */
+
+void dm_internal_suspend(struct mapped_device *md)
+{
+ mutex_lock(&md->suspend_lock);
+ if (dm_suspended_md(md))
+ return;
+
+ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+ synchronize_srcu(&md->io_barrier);
+ flush_workqueue(md->wq);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+}
+
+void dm_internal_resume(struct mapped_device *md)
+{
+ if (dm_suspended_md(md))
+ goto done;
+
+ dm_queue_flush(md);
+
+done:
+ mutex_unlock(&md->suspend_lock);
+}
+
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 45b97da1bd0..5e604cc7b4a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,6 +16,8 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include "dm-stats.h"
+
/*
* Suspend feature flags
*/
@@ -89,10 +91,21 @@ int dm_setup_md_queue(struct mapped_device *md);
#define dm_target_is_valid(t) ((t)->table)
/*
+ * To check whether the target type is bio-based or not (request-based).
+ */
+#define dm_target_bio_based(t) ((t)->type->map != NULL)
+
+/*
* To check whether the target type is request-based or not (bio-based).
*/
#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+/*
+ * To check whether the target type is a hybrid (capable of being
+ * either request-based or bio-based).
+ */
+#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -146,10 +159,16 @@ void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
+int dm_request_based(struct mapped_device *md);
+sector_t dm_get_size(struct mapped_device *md);
+struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
+void dm_internal_suspend(struct mapped_device *md);
+void dm_internal_resume(struct mapped_device *md);
+
int dm_io_init(void);
void dm_io_exit(void);
@@ -162,4 +181,12 @@ void dm_kcopyd_exit(void);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
+/*
+ * Helpers that are used by DM core
+ */
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9f13e13506e..adf4d7e1d5e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1180,7 +1180,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
- mddev->bitmap_info.space;
+ mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
@@ -3429,7 +3429,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
- if (mddev->safemode_delay < old_delay)
+ if (mddev->safemode_delay < old_delay || old_delay == 0)
md_safemode_timeout((unsigned long)mddev);
}
return len;
@@ -5144,7 +5144,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5289,7 +5289,7 @@ static void __md_stop_writes(struct mddev *mddev)
md_super_wait(mddev);
if (mddev->ro == 0 &&
- (!mddev->in_sync || mddev->flags)) {
+ (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
@@ -5337,8 +5337,14 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
err = -EBUSY;
goto out;
}
- if (bdev)
- sync_blockdev(bdev);
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
+ */
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5373,14 +5379,14 @@ static int do_md_stop(struct mddev * mddev, int mode,
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
- if (bdev)
- /* It is possible IO was issued on some other
- * open file which was closed before we took ->open_mutex.
- * As that was not the last close __blkdev_put will not
- * have called sync_blockdev, so we must.
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
*/
- sync_blockdev(bdev);
-
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
@@ -5628,10 +5634,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
char *ptr, *buf = NULL;
int err = -ENOMEM;
- if (md_allow_write(mddev))
- file = kmalloc(sizeof(*file), GFP_NOIO);
- else
- file = kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
goto out;
@@ -6420,6 +6423,20 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
+ if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+ /* Need to flush page cache, and ensure no-one else opens
+ * and writes
+ */
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > 1) {
+ mutex_unlock(&mddev->open_mutex);
+ err = -EBUSY;
+ goto abort;
+ }
+ set_bit(MD_STILL_CLOSED, &mddev->flags);
+ mutex_unlock(&mddev->open_mutex);
+ sync_blockdev(bdev);
+ }
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6673,6 +6690,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
err = 0;
atomic_inc(&mddev->openers);
+ clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
@@ -7817,7 +7835,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 20f02c0b5f2..608050c43f1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -204,12 +204,16 @@ struct mddev {
struct md_personality *pers;
dev_t unit;
int md_minor;
- struct list_head disks;
+ struct list_head disks;
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
+#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
+ * md_ioctl checked on it.
+ */
int suspended;
atomic_t active_io;
@@ -218,7 +222,7 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
- int ready; /* See when safe to pass
+ int ready; /* See when safe to pass
* IO requests down */
struct gendisk *gendisk;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 81b513890e2..a7e8bf29638 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
}
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+ dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
bm->read_only = true;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index be5bff61be2..9a82083a66b 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
+ /*
+ * Request data be prefetched into the cache.
+ */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
/*
* Switches the bm to a read only mode. Once read-only mode
* has been entered the following functions will return -EPERM.
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 35865425e4b..468e371ee9b 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -161,6 +161,7 @@ struct frame {
};
struct del_stack {
+ struct dm_btree_info *info;
struct dm_transaction_manager *tm;
int top;
struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
return s->top >= 0;
}
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+ unsigned i;
+ struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+ for (i = 0; i < f->nr_children; i++)
+ dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
{
int r;
@@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
dm_tm_dec(s->tm, b);
else {
+ uint32_t flags;
struct frame *f = s->spine + ++s->top;
r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
f->level = level;
f->nr_children = le32_to_cpu(f->n->header.nr_entries);
f->current_child = 0;
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+ prefetch_children(s, f);
}
return 0;
@@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
- return f->level < (info->levels - 1);
-}
-
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
+ s->info = info;
s->tm = info->tm;
s->top = -1;
@@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
info->value_type.dec(info->value_type.context,
value_ptr(f->n, i));
}
- f->current_child = f->nr_children;
+ pop_frame(s);
}
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 3e7a88d99eb..6058569fe86 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return dm_tm_unlock(ll->tm, blk);
}
-int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
+ uint32_t *result)
{
__le32 le_rc;
- int r = sm_ll_lookup_bitmap(ll, b, result);
-
- if (r)
- return r;
-
- if (*result != 3)
- return r;
+ int r;
r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
if (r < 0)
@@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return r;
}
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+ int r = sm_ll_lookup_bitmap(ll, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ return sm_ll_lookup_big_ref_count(ll, b, result);
+}
+
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result)
{
@@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
-int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
- uint32_t ref_count, enum allocation_event *ev)
+static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+ uint32_t (*mutator)(void *context, uint32_t old),
+ void *context, enum allocation_event *ev)
{
int r;
- uint32_t bit, old;
+ uint32_t bit, old, ref_count;
struct dm_block *nb;
dm_block_t index = b;
struct disk_index_entry ie_disk;
@@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
bm_le = dm_bitmap_data(nb);
old = sm_lookup_bitmap(bm_le, bit);
+ if (old > 2) {
+ r = sm_ll_lookup_big_ref_count(ll, b, &old);
+ if (r < 0)
+ return r;
+ }
+
+ ref_count = mutator(context, old);
+
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t set_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
-
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+ return *((uint32_t *) context);
+}
- return sm_ll_insert(ll, b, rc + 1, ev);
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+ uint32_t ref_count, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t inc_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
+ return old + 1;
+}
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+}
- if (!rc)
- return -EINVAL;
+static uint32_t dec_ref_count(void *context, uint32_t old)
+{
+ return old - 1;
+}
- return sm_ll_insert(ll, b, rc - 1, ev);
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
}
int sm_ll_commit(struct ll_disk *ll)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78ea44336e7..7ff4f252ca1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/nodemask.h>
#include <trace/events/block.h>
#include "md.h"
@@ -60,6 +61,10 @@
#include "raid0.h"
#include "bitmap.h"
+#define cpu_to_group(cpu) cpu_to_node(cpu)
+#define ANY_GROUP NUMA_NO_NODE
+
+static struct workqueue_struct *raid5_wq;
/*
* Stripe cache
*/
@@ -72,6 +77,7 @@
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -200,6 +206,49 @@ static int stripe_operations_active(struct stripe_head *sh)
test_bit(STRIPE_COMPUTE_RUN, &sh->state);
}
+static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5worker_group *group;
+ int thread_cnt;
+ int i, cpu = sh->cpu;
+
+ if (!cpu_online(cpu)) {
+ cpu = cpumask_any(cpu_online_mask);
+ sh->cpu = cpu;
+ }
+
+ if (list_empty(&sh->lru)) {
+ struct r5worker_group *group;
+ group = conf->worker_groups + cpu_to_group(cpu);
+ list_add_tail(&sh->lru, &group->handle_list);
+ group->stripes_cnt++;
+ sh->group = group;
+ }
+
+ if (conf->worker_cnt_per_group == 0) {
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+ }
+
+ group = conf->worker_groups + cpu_to_group(sh->cpu);
+
+ group->workers[0].working = true;
+ /* at least one worker should run to avoid race */
+ queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
+
+ thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
+ /* wakeup more workers */
+ for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
+ if (group->workers[i].working == false) {
+ group->workers[i].working = true;
+ queue_work_on(sh->cpu, raid5_wq,
+ &group->workers[i].work);
+ thread_cnt--;
+ }
+ }
+}
+
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
{
BUG_ON(!list_empty(&sh->lru));
@@ -214,7 +263,12 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
else {
clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
- list_add_tail(&sh->lru, &conf->handle_list);
+ if (conf->worker_cnt_per_group == 0) {
+ list_add_tail(&sh->lru, &conf->handle_list);
+ } else {
+ raid5_wakeup_stripe_thread(sh);
+ return;
+ }
}
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -239,12 +293,62 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
do_release_stripe(conf, sh);
}
+static struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+ struct llist_node *new_head = NULL;
+
+ while (head) {
+ struct llist_node *tmp = head;
+ head = head->next;
+ tmp->next = new_head;
+ new_head = tmp;
+ }
+
+ return new_head;
+}
+
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ int count = 0;
+ struct llist_node *head;
+
+ head = llist_del_all(&conf->released_stripes);
+ head = llist_reverse_order(head);
+ while (head) {
+ sh = llist_entry(head, struct stripe_head, release_list);
+ head = llist_next(head);
+ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+ smp_mb();
+ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+ /*
+ * Don't worry the bit is set here, because if the bit is set
+ * again, the count is always > 1. This is true for
+ * STRIPE_ON_UNPLUG_LIST bit too.
+ */
+ __release_stripe(conf, sh);
+ count++;
+ }
+
+ return count;
+}
+
static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ bool wakeup;
+ if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+slow_path:
local_irq_save(flags);
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
do_release_stripe(conf, sh);
spin_unlock(&conf->device_lock);
@@ -359,6 +463,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
raid5_build_block(sh, i, previous);
}
insert_hash(conf, sh);
+ sh->cpu = smp_processor_id();
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -491,7 +596,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+ && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -499,6 +605,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
!test_bit(STRIPE_EXPANDING, &sh->state))
BUG();
list_del_init(&sh->lru);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
}
}
} while (sh == NULL);
@@ -3779,6 +3889,7 @@ static void raid5_activate_delayed(struct r5conf *conf)
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
+ raid5_wakeup_stripe_thread(sh);
}
}
}
@@ -4058,18 +4169,35 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
* head of the hold_list has changed, i.e. the head was promoted to the
* handle_list.
*/
-static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
+static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
{
- struct stripe_head *sh;
+ struct stripe_head *sh = NULL, *tmp;
+ struct list_head *handle_list = NULL;
+ struct r5worker_group *wg = NULL;
+
+ if (conf->worker_cnt_per_group == 0) {
+ handle_list = &conf->handle_list;
+ } else if (group != ANY_GROUP) {
+ handle_list = &conf->worker_groups[group].handle_list;
+ wg = &conf->worker_groups[group];
+ } else {
+ int i;
+ for (i = 0; i < conf->group_cnt; i++) {
+ handle_list = &conf->worker_groups[i].handle_list;
+ wg = &conf->worker_groups[i];
+ if (!list_empty(handle_list))
+ break;
+ }
+ }
pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
__func__,
- list_empty(&conf->handle_list) ? "empty" : "busy",
+ list_empty(handle_list) ? "empty" : "busy",
list_empty(&conf->hold_list) ? "empty" : "busy",
atomic_read(&conf->pending_full_writes), conf->bypass_count);
- if (!list_empty(&conf->handle_list)) {
- sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+ if (!list_empty(handle_list)) {
+ sh = list_entry(handle_list->next, typeof(*sh), lru);
if (list_empty(&conf->hold_list))
conf->bypass_count = 0;
@@ -4087,14 +4215,32 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
((conf->bypass_threshold &&
conf->bypass_count > conf->bypass_threshold) ||
atomic_read(&conf->pending_full_writes) == 0)) {
- sh = list_entry(conf->hold_list.next,
- typeof(*sh), lru);
- conf->bypass_count -= conf->bypass_threshold;
- if (conf->bypass_count < 0)
- conf->bypass_count = 0;
- } else
+
+ list_for_each_entry(tmp, &conf->hold_list, lru) {
+ if (conf->worker_cnt_per_group == 0 ||
+ group == ANY_GROUP ||
+ !cpu_online(tmp->cpu) ||
+ cpu_to_group(tmp->cpu) == group) {
+ sh = tmp;
+ break;
+ }
+ }
+
+ if (sh) {
+ conf->bypass_count -= conf->bypass_threshold;
+ if (conf->bypass_count < 0)
+ conf->bypass_count = 0;
+ }
+ wg = NULL;
+ }
+
+ if (!sh)
return NULL;
+ if (wg) {
+ wg->stripes_cnt--;
+ sh->group = NULL;
+ }
list_del_init(&sh->lru);
atomic_inc(&sh->count);
BUG_ON(atomic_read(&sh->count) != 1);
@@ -4127,6 +4273,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
*/
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ /*
+ * STRIPE_ON_RELEASE_LIST could be set here. In that
+ * case, the count is always > 1 here
+ */
__release_stripe(conf, sh);
cnt++;
}
@@ -4286,8 +4436,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
+ int seq;
retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
previous = 0;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
@@ -4320,7 +4472,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
previous,
&dd_idx, NULL);
pr_debug("raid456: make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
+ (unsigned long long)new_sector,
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
@@ -4349,6 +4501,13 @@ static void make_request(struct mddev *mddev, struct bio * bi)
goto retry;
}
}
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head
+ * by accident
+ */
+ release_stripe(sh);
+ goto retry;
+ }
if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
@@ -4788,14 +4947,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
-#define MAX_STRIPE_BATCH 8
-static int handle_active_stripes(struct r5conf *conf)
+static int handle_active_stripes(struct r5conf *conf, int group,
+ struct r5worker *worker)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0;
while (batch_size < MAX_STRIPE_BATCH &&
- (sh = __get_priority_stripe(conf)) != NULL)
+ (sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
if (batch_size == 0)
@@ -4813,6 +4972,39 @@ static int handle_active_stripes(struct r5conf *conf)
return batch_size;
}
+static void raid5_do_work(struct work_struct *work)
+{
+ struct r5worker *worker = container_of(work, struct r5worker, work);
+ struct r5worker_group *group = worker->group;
+ struct r5conf *conf = group->conf;
+ int group_id = group - conf->worker_groups;
+ int handled;
+ struct blk_plug plug;
+
+ pr_debug("+++ raid5worker active\n");
+
+ blk_start_plug(&plug);
+ handled = 0;
+ spin_lock_irq(&conf->device_lock);
+ while (1) {
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
+
+ batch_size = handle_active_stripes(conf, group_id, worker);
+ worker->working = false;
+ if (!batch_size && !released)
+ break;
+ handled += batch_size;
+ }
+ pr_debug("%d stripes handled\n", handled);
+
+ spin_unlock_irq(&conf->device_lock);
+ blk_finish_plug(&plug);
+
+ pr_debug("--- raid5worker inactive\n");
+}
+
/*
* This is our raid5 kernel thread.
*
@@ -4836,7 +5028,9 @@ static void raid5d(struct md_thread *thread)
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- int batch_size;
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
if (
!list_empty(&conf->bitmap_list)) {
@@ -4860,8 +5054,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf);
- if (!batch_size)
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ if (!batch_size && !released)
break;
handled += batch_size;
@@ -4989,10 +5183,70 @@ stripe_cache_active_show(struct mddev *mddev, char *page)
static struct md_sysfs_entry
raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
+static ssize_t
+raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->worker_cnt_per_group);
+ else
+ return 0;
+}
+
+static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static ssize_t
+raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+ int err;
+ struct r5worker_group *old_groups;
+ int old_group_cnt;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new == conf->worker_cnt_per_group)
+ return len;
+
+ mddev_suspend(mddev);
+
+ old_groups = conf->worker_groups;
+ old_group_cnt = conf->worker_cnt_per_group;
+
+ conf->worker_groups = NULL;
+ err = alloc_thread_groups(conf, new);
+ if (err) {
+ conf->worker_groups = old_groups;
+ conf->worker_cnt_per_group = old_group_cnt;
+ } else {
+ if (old_groups)
+ kfree(old_groups[0].workers);
+ kfree(old_groups);
+ }
+
+ mddev_resume(mddev);
+
+ if (err)
+ return err;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
+ raid5_show_group_thread_cnt,
+ raid5_store_group_thread_cnt);
+
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
&raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr,
+ &raid5_group_thread_cnt.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5000,6 +5254,54 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
+static int alloc_thread_groups(struct r5conf *conf, int cnt)
+{
+ int i, j;
+ ssize_t size;
+ struct r5worker *workers;
+
+ conf->worker_cnt_per_group = cnt;
+ if (cnt == 0) {
+ conf->worker_groups = NULL;
+ return 0;
+ }
+ conf->group_cnt = num_possible_nodes();
+ size = sizeof(struct r5worker) * cnt;
+ workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
+ conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ conf->group_cnt, GFP_NOIO);
+ if (!conf->worker_groups || !workers) {
+ kfree(workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < conf->group_cnt; i++) {
+ struct r5worker_group *group;
+
+ group = &conf->worker_groups[i];
+ INIT_LIST_HEAD(&group->handle_list);
+ group->conf = conf;
+ group->workers = workers + i * cnt;
+
+ for (j = 0; j < cnt; j++) {
+ group->workers[j].group = group;
+ INIT_WORK(&group->workers[j].work, raid5_do_work);
+ }
+ }
+
+ return 0;
+}
+
+static void free_thread_groups(struct r5conf *conf)
+{
+ if (conf->worker_groups)
+ kfree(conf->worker_groups[0].workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+}
+
static sector_t
raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
@@ -5040,6 +5342,7 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
kfree(conf->disks);
@@ -5168,7 +5471,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
if (conf == NULL)
goto abort;
+ /* Don't enable multi-threading by default*/
+ if (alloc_thread_groups(conf, 0))
+ goto abort;
spin_lock_init(&conf->device_lock);
+ seqcount_init(&conf->gen_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
@@ -5176,6 +5483,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
+ init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
@@ -5980,6 +6288,7 @@ static int raid5_start_reshape(struct mddev *mddev)
atomic_set(&conf->reshape_stripes, 0);
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
conf->prev_chunk_sectors = conf->chunk_sectors;
@@ -5996,8 +6305,16 @@ static int raid5_start_reshape(struct mddev *mddev)
else
conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
+ /* Now make sure any requests that proceeded on the assumption
+ * the reshape wasn't running - like Discard or Read - have
+ * completed.
+ */
+ mddev_suspend(mddev);
+ mddev_resume(mddev);
+
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
* Don't add devices if we are reducing the number of
@@ -6472,6 +6789,10 @@ static struct md_personality raid4_personality =
static int __init raid5_init(void)
{
+ raid5_wq = alloc_workqueue("raid5wq",
+ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+ if (!raid5_wq)
+ return -ENOMEM;
register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
@@ -6483,6 +6804,7 @@ static void raid5_exit(void)
unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
+ destroy_workqueue(raid5_wq);
}
module_init(raid5_init);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 70c49329ca9..2113ffa82c7 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -197,6 +197,7 @@ enum reconstruct_states {
struct stripe_head {
struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
+ struct llist_node release_list;
struct r5conf *raid_conf;
short generation; /* increments with every
* reshape */
@@ -211,6 +212,8 @@ struct stripe_head {
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
+ int cpu;
+ struct r5worker_group *group;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -321,6 +324,7 @@ enum {
STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
+ STRIPE_ON_RELEASE_LIST,
};
/*
@@ -363,6 +367,19 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+struct r5worker {
+ struct work_struct work;
+ struct r5worker_group *group;
+ bool working;
+};
+
+struct r5worker_group {
+ struct list_head handle_list;
+ struct r5conf *conf;
+ struct r5worker *workers;
+ int stripes_cnt;
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
@@ -386,6 +403,7 @@ struct r5conf {
int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */
+ seqcount_t gen_lock; /* lock against generation changes */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
long long min_offset_diff; /* minimum difference between
@@ -445,6 +463,7 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list;
+ struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
int inactive_blocked; /* release of inactive stripes blocked,
@@ -458,6 +477,9 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct r5worker_group *worker_groups;
+ int group_cnt;
+ int worker_cnt_per_group;
};
/*
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 95f1814b536..1d389491d5f 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -24,3 +24,15 @@ config MSPRO_BLOCK
support. This provides a block device driver, which you can use
to mount the filesystem. Almost everyone wishing MemoryStick
support should say Y or M here.
+
+config MS_BLOCK
+ tristate "MemoryStick Standard device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the MemoryStick Standard device driver
+ support. This provides a block device driver, which you can use
+ to mount the filesystem.
+ This driver works with old (bulky) MemoryStick and MemoryStick Duo
+ but not PRO. Say Y if you have such card.
+ Driver is new and not yet well tested, thus it can damage your card
+ (even permanently)
diff --git a/drivers/memstick/core/Makefile b/drivers/memstick/core/Makefile
index ecd02993773..0d7f90c0ff2 100644
--- a/drivers/memstick/core/Makefile
+++ b/drivers/memstick/core/Makefile
@@ -3,5 +3,5 @@
#
obj-$(CONFIG_MEMSTICK) += memstick.o
-
+obj-$(CONFIG_MS_BLOCK) += ms_block.o
obj-$(CONFIG_MSPRO_BLOCK) += mspro_block.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644
index 00000000000..08e70232062
--- /dev/null
+++ b/drivers/memstick/core/ms_block.c
@@ -0,0 +1,2385 @@
+/*
+ * ms_block.c - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ */
+#define DRIVER_NAME "ms_block"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from,
+ struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
+{
+ size_t copied = 0;
+
+ while (offset > 0) {
+ if (offset >= sg_from->length) {
+ if (sg_is_last(sg_from))
+ return 0;
+
+ offset -= sg_from->length;
+ sg_from = sg_next(sg_from);
+ continue;
+ }
+
+ copied = min(len, sg_from->length - offset);
+ sg_set_page(sg_to, sg_page(sg_from),
+ copied, sg_from->offset + offset);
+
+ len -= copied;
+ offset = 0;
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_to = sg_next(sg_to);
+ to_nents--;
+ sg_from = sg_next(sg_from);
+ }
+
+ while (len > sg_from->length && to_nents--) {
+ len -= sg_from->length;
+ copied += sg_from->length;
+
+ sg_set_page(sg_to, sg_page(sg_from),
+ sg_from->length, sg_from->offset);
+
+ if (sg_is_last(sg_from) || !len)
+ goto out;
+
+ sg_from = sg_next(sg_from);
+ sg_to = sg_next(sg_to);
+ }
+
+ if (len && to_nents) {
+ sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+ copied += len;
+ }
+out:
+ sg_mark_end(sg_to);
+ return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+ size_t offset, u8 *buffer, size_t len)
+{
+ int retval = 0, cmplen;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+ if (offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ cmplen = min(miter.length - offset, len);
+ retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+ if (retval)
+ break;
+
+ buffer += cmplen;
+ len -= cmplen;
+ offset = 0;
+ }
+
+ if (!retval && len)
+ retval = -1;
+
+ sg_miter_stop(&miter);
+ return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+*/
+static int msb_get_zone_from_lba(int lba)
+{
+ if (lba < 494)
+ return 0;
+ return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+ return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+ int total_free_blocks = 0;
+ int i;
+
+ if (!debug)
+ return 0;
+
+ for (i = 0; i < msb->zone_count; i++)
+ total_free_blocks += msb->free_block_count[i];
+
+ if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+ msb->block_count) == total_free_blocks)
+ return 0;
+
+ pr_err("BUG: free block counts don't match the bitmap");
+ msb->read_only = true;
+ return -EINVAL;
+}
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err(
+ "BUG: attempt to mark already used pba %d as used", pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __set_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+ int zone = msb_get_zone_from_pba(pba);
+
+ if (!test_bit(pba, msb->used_blocks_bitmap)) {
+ pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
+ msb->read_only = true;
+ return;
+ }
+
+ if (msb_validate_used_block_bitmap(msb))
+ return;
+
+ /* No races because all IO is single threaded */
+ __clear_bit(pba, msb->used_blocks_bitmap);
+ msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+ msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.w_length = sizeof(struct ms_id_register);
+ msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+ msb->reg_addr.r_length = sizeof(struct ms_id_register);
+ msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
+ (struct memstick_dev *card, struct memstick_request **req))
+{
+ struct memstick_dev *card = msb->card;
+
+ WARN_ON(msb->state != -1);
+ msb->int_polling = false;
+ msb->state = 0;
+ msb->exit_error = 0;
+
+ memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+ card->next_request = state_func;
+ memstick_new_req(card->host);
+ wait_for_completion(&card->mrq_complete);
+
+ WARN_ON(msb->state != -1);
+ return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+ WARN_ON(msb->state == -1);
+
+ msb->state = -1;
+ msb->exit_error = error;
+ msb->card->next_request = h_msb_default_bad;
+
+ /* Invalidate reg window on errors */
+ if (error)
+ msb_invalidate_reg_window(msb);
+
+ complete(&msb->card->mrq_complete);
+ return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+ struct memstick_request *mrq = &msb->card->current_mrq;
+
+ WARN_ON(msb->state == -1);
+
+ if (!msb->int_polling) {
+ msb->int_timeout = jiffies +
+ msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+ msb->int_polling = true;
+ } else if (time_after(jiffies, msb->int_timeout)) {
+ mrq->data[0] = MEMSTICK_INT_CMDNAK;
+ return 0;
+ }
+
+ if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+ mrq->need_card_int && !mrq->error) {
+ mrq->data[0] = mrq->int_reg;
+ mrq->need_card_int = false;
+ return 0;
+ } else {
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ return 1;
+ }
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.r_offset != offset ||
+ msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.r_offset = offset;
+ msb->reg_addr.r_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+ return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+ struct memstick_request *req = &msb->card->current_mrq;
+
+ if (msb->reg_addr.w_offset != offset ||
+ msb->reg_addr.w_length != len || !msb->addr_valid) {
+
+ msb->reg_addr.w_offset = offset;
+ msb->reg_addr.w_length = len;
+ msb->addr_valid = true;
+
+ memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+ &msb->reg_addr, sizeof(msb->reg_addr));
+ return 0;
+ }
+
+ memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+ return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq)
+{
+ return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 command, intreg;
+
+ if (mrq->error) {
+ dbg("read_page, unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+ case MSB_RP_SEND_BLOCK_ADDRESS:
+ /* msb_write_regs sometimes "fails" because it needs to update
+ the reg window, and thus it returns request for that.
+ Then we stay in this state and retry */
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_RP_SEND_READ_COMMAND;
+ return 0;
+
+ case MSB_RP_SEND_READ_COMMAND:
+ command = MS_CMD_BLOCK_READ;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_RP_SEND_INT_REQ;
+ return 0;
+
+ case MSB_RP_SEND_INT_REQ:
+ msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+ /* If dont actually need to send the int read request (only in
+ serial mode), then just fall through */
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_RP_RECEIVE_INT_REQ_RESULT:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_RP_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = (intreg & MEMSTICK_INT_ERR) ?
+ MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+ goto again;
+
+ case MSB_RP_SEND_READ_STATUS_REG:
+ /* read the status register to understand source of the INT_ERR */
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, status),
+ sizeof(struct ms_status_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_OOB_READ;
+ return 0;
+
+ case MSB_RP_RECIVE_STATUS_REG:
+ msb->regs.status = *(struct ms_status_register *)mrq->data;
+ msb->state = MSB_RP_SEND_OOB_READ;
+ /* fallthrough */
+
+ case MSB_RP_SEND_OOB_READ:
+ if (!msb_read_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register)))
+ return 0;
+
+ msb->state = MSB_RP_RECEIVE_OOB_READ;
+ return 0;
+
+ case MSB_RP_RECEIVE_OOB_READ:
+ msb->regs.extra_data =
+ *(struct ms_extra_data_register *) mrq->data;
+ msb->state = MSB_RP_SEND_READ_DATA;
+ /* fallthrough */
+
+ case MSB_RP_SEND_READ_DATA:
+ /* Skip that state if we only read the oob */
+ if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ goto again;
+ }
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size);
+
+ memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+ msb->state = MSB_RP_RECEIVE_READ_DATA;
+ return 0;
+
+ case MSB_RP_RECEIVE_READ_DATA:
+ if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+ dbg("read_page: uncorrectable error");
+ return msb_exit_state_machine(msb, -EBADMSG);
+ }
+
+ if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+ dbg("read_page: correctable error");
+ msb->current_sg_offset += msb->page_size;
+ return msb_exit_state_machine(msb, -EUCLEAN);
+ } else {
+ dbg("read_page: INT error, but no status error bits");
+ return msb_exit_state_machine(msb, -EIO);
+ }
+ }
+
+ BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct scatterlist sg[2];
+ u8 intreg, command;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+again:
+ switch (msb->state) {
+
+ /* HACK: Jmicon handling of TPCs between 8 and
+ * sizeof(memstick_request.data) is broken due to hardware
+ * bug in PIO mode that is used for these TPCs
+ * Therefore split the write
+ */
+
+ case MSB_WB_SEND_WRITE_PARAMS:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+
+ msb->state = MSB_WB_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_WB_SEND_WRITE_OOB:
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+ msb->state = MSB_WB_SEND_WRITE_COMMAND;
+ return 0;
+
+
+ case MSB_WB_SEND_WRITE_COMMAND:
+ command = MS_CMD_BLOCK_WRITE;
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ return 0;
+
+ case MSB_WB_SEND_INT_REQ:
+ msb->state = MSB_WB_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_WB_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+ msb->regs.status.interrupt = intreg;
+
+ /* errors mean out of here, and fast... */
+ if (intreg & (MEMSTICK_INT_CMDNAK))
+ return msb_exit_state_machine(msb, -EIO);
+
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+
+ /* for last page we need to poll CED */
+ if (msb->current_page == msb->pages_in_block) {
+ if (intreg & MEMSTICK_INT_CED)
+ return msb_exit_state_machine(msb, 0);
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+
+ }
+
+ /* for non-last page we need BREQ before writing next chunk */
+ if (!(intreg & MEMSTICK_INT_BREQ)) {
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ }
+
+ msb->int_polling = false;
+ msb->state = MSB_WB_SEND_WRITE_DATA;
+ /* fallthrough */
+
+ case MSB_WB_SEND_WRITE_DATA:
+ sg_init_table(sg, ARRAY_SIZE(sg));
+
+ if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+ msb->current_sg_offset,
+ msb->page_size) < msb->page_size)
+ return msb_exit_state_machine(msb, -EIO);
+
+ memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+ mrq->need_card_int = 1;
+ msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+ return 0;
+
+ case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+ msb->current_page++;
+ msb->current_sg_offset += msb->page_size;
+ msb->state = MSB_WB_SEND_INT_REQ;
+ goto again;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ u8 intreg;
+
+ if (mrq->error) {
+ dbg("send_command: unknown error");
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+again:
+ switch (msb->state) {
+
+ /* HACK: see h_msb_write_block */
+ case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ sizeof(struct ms_param_register),
+ &msb->regs.param))
+ return 0;
+ msb->state = MSB_SC_SEND_WRITE_OOB;
+ return 0;
+
+ case MSB_SC_SEND_WRITE_OOB:
+ if (!msb->command_need_oob) {
+ msb->state = MSB_SC_SEND_COMMAND;
+ goto again;
+ }
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, extra_data),
+ sizeof(struct ms_extra_data_register),
+ &msb->regs.extra_data))
+ return 0;
+
+ msb->state = MSB_SC_SEND_COMMAND;
+ return 0;
+
+ case MSB_SC_SEND_COMMAND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+ msb->state = MSB_SC_SEND_INT_REQ;
+ return 0;
+
+ case MSB_SC_SEND_INT_REQ:
+ msb->state = MSB_SC_RECEIVE_INT_REQ;
+ if (msb_read_int_reg(msb, -1))
+ return 0;
+ /* fallthrough */
+
+ case MSB_SC_RECEIVE_INT_REQ:
+ intreg = mrq->data[0];
+
+ if (intreg & MEMSTICK_INT_CMDNAK)
+ return msb_exit_state_machine(msb, -EIO);
+ if (intreg & MEMSTICK_INT_ERR)
+ return msb_exit_state_machine(msb, -EBADMSG);
+
+ if (!(intreg & MEMSTICK_INT_CED)) {
+ msb->state = MSB_SC_SEND_INT_REQ;
+ goto again;
+ }
+
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ u8 command = MS_CMD_RESET;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+ if (mrq->error)
+ return msb_exit_state_machine(msb, mrq->error);
+
+ switch (msb->state) {
+ case MSB_RS_SEND:
+ memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+ mrq->need_card_int = 0;
+ msb->state = MSB_RS_CONFIRM;
+ return 0;
+ case MSB_RS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+ BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+ struct memstick_request **out_mrq)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+ struct memstick_host *host = card->host;
+
+ if (mrq->error) {
+ dbg("parallel_switch: error");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ return msb_exit_state_machine(msb, mrq->error);
+ }
+
+ switch (msb->state) {
+ case MSB_PS_SEND_SWITCH_COMMAND:
+ /* Set the parallel interface on memstick side */
+ msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+ if (!msb_write_regs(msb,
+ offsetof(struct ms_register, param),
+ 1,
+ (unsigned char *)&msb->regs.param))
+ return 0;
+
+ msb->state = MSB_PS_SWICH_HOST;
+ return 0;
+
+ case MSB_PS_SWICH_HOST:
+ /* Set parallel interface on our side + send a dummy request
+ to see if card responds */
+ host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+ msb->state = MSB_PS_CONFIRM;
+ return 0;
+
+ case MSB_PS_CONFIRM:
+ return msb_exit_state_machine(msb, 0);
+ }
+
+ BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+ bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+ struct memstick_dev *card = msb->card;
+ struct memstick_host *host = card->host;
+ int error;
+
+ /* Reset the card */
+ msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+ if (full) {
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+ if (error)
+ goto out_error;
+
+ msb_invalidate_reg_window(msb);
+
+ error = host->set_param(host,
+ MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ if (error)
+ goto out_error;
+
+ error = host->set_param(host,
+ MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+ if (error) {
+out_error:
+ dbg("Failed to reset the host controller");
+ msb->read_only = true;
+ return -EFAULT;
+ }
+ }
+
+ error = msb_run_state_machine(msb, h_msb_reset);
+ if (error) {
+ dbg("Failed to reset the card");
+ msb->read_only = true;
+ return -ENODEV;
+ }
+
+ /* Set parallel mode */
+ if (was_parallel)
+ msb_switch_to_parallel(msb);
+ return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+ int error;
+
+ error = msb_run_state_machine(msb, h_msb_parallel_switch);
+ if (error) {
+ pr_err("Switch to parallel failed");
+ msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+ msb_reset(msb, true);
+ return -EFAULT;
+ }
+
+ msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+ return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+ u16 pba, u8 page, u8 flag)
+{
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+ msb->regs.extra_data.overwrite_flag = flag;
+ msb->command_value = MS_CMD_BLOCK_WRITE;
+ msb->command_need_oob = true;
+
+ dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+ flag, pba, page);
+ return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+ pr_notice("marking pba %d as bad", pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(
+ msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+ dbg("marking page %d of pba %d as bad", page, pba);
+ msb_reset(msb, true);
+ return msb_set_overwrite_flag(msb,
+ pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+ int error, try;
+ if (msb->read_only)
+ return -EROFS;
+
+ dbg_verbose("erasing pba %d", pba);
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = 0;
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->command_value = MS_CMD_BLOCK_ERASE;
+ msb->command_need_oob = false;
+
+
+ error = msb_run_state_machine(msb, h_msb_send_command);
+ if (!error || msb_reset(msb, true))
+ break;
+ }
+
+ if (error) {
+ pr_err("erase failed, marking pba %d as bad", pba);
+ msb_mark_bad(msb, pba);
+ }
+
+ dbg_verbose("erase success, marking pba %d as unused", pba);
+ msb_mark_block_unused(msb, pba);
+ __set_bit(pba, msb->erased_blocks_bitmap);
+ return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+ u16 pba, u8 page, struct ms_extra_data_register *extra,
+ struct scatterlist *sg, int offset)
+{
+ int try, error;
+
+ if (pba == MS_BLOCK_INVALID) {
+ unsigned long flags;
+ struct sg_mapping_iter miter;
+ size_t len = msb->page_size;
+
+ dbg_verbose("read unmapped sector. returning 0xFF");
+
+ local_irq_save(flags);
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ while (sg_miter_next(&miter) && len > 0) {
+
+ int chunklen;
+
+ if (offset && offset >= miter.length) {
+ offset -= miter.length;
+ continue;
+ }
+
+ chunklen = min(miter.length - offset, len);
+ memset(miter.addr + offset, 0xFF, chunklen);
+ len -= chunklen;
+ offset = 0;
+ }
+
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+
+ if (offset)
+ return -EFAULT;
+
+ if (extra)
+ memset(extra, 0xFF, sizeof(*extra));
+ return 0;
+ }
+
+ if (pba >= msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ for (try = 1; try < 3; try++) {
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ error = 0;
+ }
+
+ if (!error && extra)
+ *extra = msb->regs.extra_data;
+
+ if (!error || msb_reset(msb, true))
+ break;
+
+ }
+
+ /* Mark bad pages */
+ if (error == -EBADMSG) {
+ pr_err("uncorrectable error on read of pba %d, page %d",
+ pba, page);
+
+ if (msb->regs.extra_data.overwrite_flag &
+ MEMSTICK_OVERWRITE_PGST0)
+ msb_mark_page_bad(msb, pba, page);
+ return -EBADMSG;
+ }
+
+ if (error)
+ pr_err("read of pba %d, page %d failed with error %d",
+ pba, page, error);
+ return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+ struct ms_extra_data_register *extra)
+{
+ int error;
+
+ BUG_ON(!extra);
+ msb->regs.param.block_address = cpu_to_be16(pba);
+ msb->regs.param.page_address = page;
+ msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+ if (pba > msb->block_count) {
+ pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
+ return -EINVAL;
+ }
+
+ error = msb_run_state_machine(msb, h_msb_read_page);
+ *extra = msb->regs.extra_data;
+
+ if (error == -EUCLEAN) {
+ pr_notice("correctable error on pba %d, page %d",
+ pba, page);
+ return 0;
+ }
+
+ return error;
+}
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+ struct scatterlist *orig_sg, int offset)
+{
+ struct scatterlist sg;
+ int page = 0, error;
+
+ sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+ while (page < msb->pages_in_block) {
+
+ error = msb_read_page(msb, pba, page,
+ NULL, &sg, page * msb->page_size);
+ if (error)
+ return error;
+ page++;
+ }
+
+ if (msb_sg_compare_to_buffer(orig_sg, offset,
+ msb->block_buffer, msb->block_size))
+ return -EIO;
+ return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+ u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+ int error, current_try = 1;
+ BUG_ON(sg->length < msb->page_size);
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (pba == MS_BLOCK_INVALID) {
+ pr_err(
+ "BUG: write: attempt to write MS_BLOCK_INVALID block");
+ return -EINVAL;
+ }
+
+ if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+ pr_err(
+ "BUG: write: attempt to write beyond the end of device");
+ return -EINVAL;
+ }
+
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_err("BUG: write: lba zone mismatch");
+ return -EINVAL;
+ }
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ pr_err("BUG: write: attempt to write to boot blocks!");
+ return -EINVAL;
+ }
+
+ while (1) {
+
+ if (msb->read_only)
+ return -EROFS;
+
+ msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+ msb->regs.param.page_address = 0;
+ msb->regs.param.block_address = cpu_to_be16(pba);
+
+ msb->regs.extra_data.management_flag = 0xFF;
+ msb->regs.extra_data.overwrite_flag = 0xF8;
+ msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+ msb->current_sg = sg;
+ msb->current_sg_offset = offset;
+ msb->current_page = 0;
+
+ error = msb_run_state_machine(msb, h_msb_write_block);
+
+ /* Sector we just wrote to is assumed erased since its pba
+ was erased. If it wasn't erased, write will succeed
+ and will just clear the bits that were set in the block
+ thus test that what we have written,
+ matches what we expect.
+ We do trust the blocks that we erased */
+ if (!error && (verify_writes ||
+ !test_bit(pba, msb->erased_blocks_bitmap)))
+ error = msb_verify_block(msb, pba, sg, offset);
+
+ if (!error)
+ break;
+
+ if (current_try > 1 || msb_reset(msb, true))
+ break;
+
+ pr_err("write failed, trying to erase the pba %d", pba);
+ error = msb_erase_block(msb, pba);
+ if (error)
+ break;
+
+ current_try++;
+ }
+ return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+ u16 pos;
+ int pba = zone * MS_BLOCKS_IN_ZONE;
+ int i;
+
+ get_random_bytes(&pos, sizeof(pos));
+
+ if (!msb->free_block_count[zone]) {
+ pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ pos %= msb->free_block_count[zone];
+
+ dbg_verbose("have %d choices for a free block, selected randomally: %d",
+ msb->free_block_count[zone], pos);
+
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba);
+ for (i = 0; i < pos; ++i)
+ pba = find_next_zero_bit(msb->used_blocks_bitmap,
+ msb->block_count, pba + 1);
+
+ dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+ if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+ pr_err("BUG: cant get a free block");
+ msb->read_only = true;
+ return MS_BLOCK_INVALID;
+ }
+
+ msb_mark_block_used(msb, pba);
+ return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+ struct scatterlist *sg, int offset)
+{
+ u16 pba, new_pba;
+ int error, try;
+
+ pba = msb->lba_to_pba_table[lba];
+ dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
+
+ if (pba != MS_BLOCK_INVALID) {
+ dbg_verbose("setting the update flag on the block");
+ msb_set_overwrite_flag(msb, pba, 0,
+ 0xFF & ~MEMSTICK_OVERWRITE_UDST);
+ }
+
+ for (try = 0; try < 3; try++) {
+ new_pba = msb_get_free_block(msb,
+ msb_get_zone_from_lba(lba));
+
+ if (new_pba == MS_BLOCK_INVALID) {
+ error = -EIO;
+ goto out;
+ }
+
+ dbg_verbose("block update: writing updated block to the pba %d",
+ new_pba);
+ error = msb_write_block(msb, new_pba, lba, sg, offset);
+ if (error == -EBADMSG) {
+ msb_mark_bad(msb, new_pba);
+ continue;
+ }
+
+ if (error)
+ goto out;
+
+ dbg_verbose("block update: erasing the old block");
+ msb_erase_block(msb, pba);
+ msb->lba_to_pba_table[lba] = new_pba;
+ return 0;
+ }
+out:
+ if (error) {
+ pr_err("block update error after %d tries, switching to r/o mode", try);
+ msb->read_only = true;
+ }
+ return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+ p->header.block_id = be16_to_cpu(p->header.block_id);
+ p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+ p->entry.disabled_block.start_addr
+ = be32_to_cpu(p->entry.disabled_block.start_addr);
+ p->entry.disabled_block.data_size
+ = be32_to_cpu(p->entry.disabled_block.data_size);
+ p->entry.cis_idi.start_addr
+ = be32_to_cpu(p->entry.cis_idi.start_addr);
+ p->entry.cis_idi.data_size
+ = be32_to_cpu(p->entry.cis_idi.data_size);
+ p->attr.block_size = be16_to_cpu(p->attr.block_size);
+ p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+ p->attr.number_of_effective_blocks
+ = be16_to_cpu(p->attr.number_of_effective_blocks);
+ p->attr.page_size = be16_to_cpu(p->attr.page_size);
+ p->attr.memory_manufacturer_code
+ = be16_to_cpu(p->attr.memory_manufacturer_code);
+ p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+ p->attr.implemented_capacity
+ = be16_to_cpu(p->attr.implemented_capacity);
+ p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+ p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+ int pba = 0;
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ struct ms_boot_page *page;
+
+ msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+ msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+ msb->boot_block_count = 0;
+
+ dbg_verbose("Start of a scan for the boot blocks");
+
+ if (!msb->boot_page) {
+ page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ msb->boot_page = page;
+ } else
+ page = msb->boot_page;
+
+ msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+ for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
+
+ sg_init_one(&sg, page, sizeof(*page));
+ if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+ dbg("boot scan: can't read pba %d", pba);
+ continue;
+ }
+
+ if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+ dbg("managment flag doesn't indicate boot block %d",
+ pba);
+ continue;
+ }
+
+ if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+ dbg("the pba at %d doesn' contain boot block ID", pba);
+ continue;
+ }
+
+ msb_fix_boot_page_endianness(page);
+ msb->boot_block_locations[msb->boot_block_count] = pba;
+
+ page++;
+ msb->boot_block_count++;
+
+ if (msb->boot_block_count == 2)
+ break;
+ }
+
+ if (!msb->boot_block_count) {
+ pr_err("media doesn't contain master page, aborting");
+ return -EIO;
+ }
+
+ dbg_verbose("End of scan for boot blocks");
+ return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+ struct ms_boot_page *boot_block;
+ struct scatterlist sg;
+ u16 *buffer = NULL;
+ int offset = 0;
+ int i, error = 0;
+ int data_size, data_offset, page, page_offset, size_to_read;
+ u16 pba;
+
+ BUG_ON(block_nr > 1);
+ boot_block = &msb->boot_page[block_nr];
+ pba = msb->boot_block_locations[block_nr];
+
+ if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+ return -EINVAL;
+
+ data_size = boot_block->entry.disabled_block.data_size;
+ data_offset = sizeof(struct ms_boot_page) +
+ boot_block->entry.disabled_block.start_addr;
+ if (!data_size)
+ return 0;
+
+ page = data_offset / msb->page_size;
+ page_offset = data_offset % msb->page_size;
+ size_to_read =
+ DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+ msb->page_size;
+
+ dbg("reading bad block of boot block at pba %d, offset %d len %d",
+ pba, data_offset, data_size);
+
+ buffer = kzalloc(size_to_read, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Read the buffer */
+ sg_init_one(&sg, buffer, size_to_read);
+
+ while (offset < size_to_read) {
+ error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+ if (error)
+ goto out;
+
+ page++;
+ offset += msb->page_size;
+
+ if (page == msb->pages_in_block) {
+ pr_err(
+ "bad block table extends beyond the boot block");
+ break;
+ }
+ }
+
+ /* Process the bad block table */
+ for (i = page_offset; i < data_size / sizeof(u16); i++) {
+
+ u16 bad_block = be16_to_cpu(buffer[i]);
+
+ if (bad_block >= msb->block_count) {
+ dbg("bad block table contains invalid block %d",
+ bad_block);
+ continue;
+ }
+
+ if (test_bit(bad_block, msb->used_blocks_bitmap)) {
+ dbg("duplicate bad block %d in the table",
+ bad_block);
+ continue;
+ }
+
+ dbg("block %d is marked as factory bad", bad_block);
+ msb_mark_block_used(msb, bad_block);
+ }
+out:
+ kfree(buffer);
+ return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+ int i;
+
+ if (msb->ftl_initialized)
+ return 0;
+
+ msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+ msb->logical_block_count = msb->zone_count * 496 - 2;
+
+ msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->lba_to_pba_table =
+ kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+ if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+ !msb->erased_blocks_bitmap) {
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->erased_blocks_bitmap);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < msb->zone_count; i++)
+ msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+ memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+ msb->logical_block_count * sizeof(u16));
+
+ dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
+ msb->zone_count, msb->logical_block_count);
+
+ msb->ftl_initialized = true;
+ return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+ u16 pba, lba, other_block;
+ u8 overwrite_flag, managment_flag, other_overwrite_flag;
+ int error;
+ struct ms_extra_data_register extra;
+ u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+ if (!overwrite_flags)
+ return -ENOMEM;
+
+ dbg("Start of media scanning");
+ for (pba = 0; pba < msb->block_count; pba++) {
+
+ if (pba == msb->boot_block_locations[0] ||
+ pba == msb->boot_block_locations[1]) {
+ dbg_verbose("pba %05d -> [boot block]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ if (test_bit(pba, msb->used_blocks_bitmap)) {
+ dbg_verbose("pba %05d -> [factory bad]", pba);
+ continue;
+ }
+
+ memset(&extra, 0, sizeof(extra));
+ error = msb_read_oob(msb, pba, 0, &extra);
+
+ /* can't trust the page if we can't read the oob */
+ if (error == -EBADMSG) {
+ pr_notice(
+ "oob of pba %d damaged, will try to erase it", pba);
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ } else if (error) {
+ pr_err("unknown error %d on read of oob of pba %d - aborting",
+ error, pba);
+
+ kfree(overwrite_flags);
+ return error;
+ }
+
+ lba = be16_to_cpu(extra.logical_address);
+ managment_flag = extra.management_flag;
+ overwrite_flag = extra.overwrite_flag;
+ overwrite_flags[pba] = overwrite_flag;
+
+ /* Skip bad blocks */
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+ dbg("pba %05d -> [BAD]", pba);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Skip system/drm blocks */
+ if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+ MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+ dbg("pba %05d -> [reserved managment flag %02x]",
+ pba, managment_flag);
+ msb_mark_block_used(msb, pba);
+ continue;
+ }
+
+ /* Erase temporary tables */
+ if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+ dbg("pba %05d -> [temp table] - will erase", pba);
+
+ msb_mark_block_used(msb, pba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ if (lba == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [free]", pba);
+ continue;
+ }
+
+ msb_mark_block_used(msb, pba);
+
+ /* Block has LBA not according to zoning*/
+ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+ pr_notice("pba %05d -> [bad lba %05d] - will erase",
+ pba, lba);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ /* No collisions - great */
+ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+ dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ other_block = msb->lba_to_pba_table[lba];
+ other_overwrite_flag = overwrite_flags[other_block];
+
+ pr_notice("Collision between pba %d and pba %d",
+ pba, other_block);
+
+ if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it", pba);
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ continue;
+ }
+
+ if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+ pr_notice("pba %d is marked as stable, use it",
+ other_block);
+ msb_erase_block(msb, pba);
+ continue;
+ }
+
+ pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
+ pba, other_block, other_block);
+
+ msb_erase_block(msb, other_block);
+ msb->lba_to_pba_table[lba] = pba;
+ }
+
+ dbg("End of media scanning");
+ kfree(overwrite_flags);
+ return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+ struct msb_data *msb = (struct msb_data *)data;
+ msb->need_flush_cache = true;
+ queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return;
+
+ del_timer_sync(&msb->cache_flush_timer);
+
+ dbg_verbose("Discarding the write cache");
+ msb->cache_block_lba = MS_BLOCK_INVALID;
+ bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+ setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+ (unsigned long)msb);
+
+ if (!msb->cache)
+ msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->cache)
+ return -ENOMEM;
+
+ msb_cache_discard(msb);
+ return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+ struct scatterlist sg;
+ struct ms_extra_data_register extra;
+ int page, offset, error;
+ u16 pba, lba;
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID)
+ return 0;
+
+ lba = msb->cache_block_lba;
+ pba = msb->lba_to_pba_table[lba];
+
+ dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+ pba, msb->cache_block_lba);
+
+ sg_init_one(&sg, msb->cache , msb->block_size);
+
+ /* Read all missing pages in cache */
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ offset = page * msb->page_size;
+
+ dbg_verbose("reading non-present sector %d of cache block %d",
+ page, lba);
+ error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+ /* Bad pages are copied with 00 page status */
+ if (error == -EBADMSG) {
+ pr_err("read error on sector %d, contents probably damaged", page);
+ continue;
+ }
+
+ if (error)
+ return error;
+
+ if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+ MEMSTICK_OV_PG_NORMAL) {
+ dbg("page %d is marked as bad", page);
+ continue;
+ }
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ }
+
+ /* Write the cache now */
+ error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+ pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+ /* Mark invalid pages */
+ if (!error) {
+ for (page = 0; page < msb->pages_in_block; page++) {
+
+ if (test_bit(page, &msb->valid_cache_bitmap))
+ continue;
+
+ dbg("marking page %d as containing damaged data",
+ page);
+ msb_set_overwrite_flag(msb,
+ pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+ }
+ }
+
+ msb_cache_discard(msb);
+ return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+ int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+ int error;
+ struct scatterlist sg_tmp[10];
+
+ if (msb->read_only)
+ return -EROFS;
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+ lba != msb->cache_block_lba)
+ if (add_to_cache_only)
+ return 0;
+
+ /* If we need to write different block */
+ if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+ lba != msb->cache_block_lba) {
+ dbg_verbose("first flush the cache");
+ error = msb_cache_flush(msb);
+ if (error)
+ return error;
+ }
+
+ if (msb->cache_block_lba == MS_BLOCK_INVALID) {
+ msb->cache_block_lba = lba;
+ mod_timer(&msb->cache_flush_timer,
+ jiffies + msecs_to_jiffies(cache_flush_timeout));
+ }
+
+ dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+ sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + page * msb->page_size, msb->page_size);
+
+ set_bit(page, &msb->valid_cache_bitmap);
+ return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int offset)
+{
+ int pba = msb->lba_to_pba_table[lba];
+ struct scatterlist sg_tmp[10];
+ int error = 0;
+
+ if (lba == msb->cache_block_lba &&
+ test_bit(page, &msb->valid_cache_bitmap)) {
+
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+ lba, pba, page);
+
+ sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+ msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
+ offset, msb->page_size);
+ sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+ msb->cache + msb->page_size * page,
+ msb->page_size);
+ } else {
+ dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+ lba, pba, page);
+
+ error = msb_read_page(msb, pba, page, NULL, sg, offset);
+ if (error)
+ return error;
+
+ msb_cache_write(msb, lba, page, true, sg, offset);
+ }
+ return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick */
+
+static const struct chs_entry chs_table[] = {
+/* size sectors cylynders heads */
+ { 4, 16, 247, 2 },
+ { 8, 16, 495, 2 },
+ { 16, 16, 495, 4 },
+ { 32, 16, 991, 4 },
+ { 64, 16, 991, 8 },
+ {128, 16, 991, 16 },
+ { 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ struct ms_boot_page *boot_block;
+ int error = 0, i, raw_size_in_megs;
+
+ msb->caps = 0;
+
+ if (card->id.class >= MEMSTICK_CLASS_ROM &&
+ card->id.class <= MEMSTICK_CLASS_ROM)
+ msb->read_only = true;
+
+ msb->state = -1;
+ error = msb_reset(msb, false);
+ if (error)
+ return error;
+
+ /* Due to a bug in Jmicron driver written by Alex Dubov,
+ its serial mode barely works,
+ so we switch to parallel mode right away */
+ if (host->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+
+ msb->page_size = sizeof(struct ms_boot_page);
+
+ /* Read the boot page */
+ error = msb_read_boot_blocks(msb);
+ if (error)
+ return -EIO;
+
+ boot_block = &msb->boot_page[0];
+
+ /* Save intersting attributes from boot page */
+ msb->block_count = boot_block->attr.number_of_blocks;
+ msb->page_size = boot_block->attr.page_size;
+
+ msb->pages_in_block = boot_block->attr.block_size * 2;
+ msb->block_size = msb->page_size * msb->pages_in_block;
+
+ if (msb->page_size > PAGE_SIZE) {
+ /* this isn't supported by linux at all, anyway*/
+ dbg("device page %d size isn't supported", msb->page_size);
+ return -EINVAL;
+ }
+
+ msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+ if (!msb->block_buffer)
+ return -ENOMEM;
+
+ raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+ for (i = 0; chs_table[i].size; i++) {
+
+ if (chs_table[i].size != raw_size_in_megs)
+ continue;
+
+ msb->geometry.cylinders = chs_table[i].cyl;
+ msb->geometry.heads = chs_table[i].head;
+ msb->geometry.sectors = chs_table[i].sec;
+ break;
+ }
+
+ if (boot_block->attr.transfer_supporting == 1)
+ msb->caps |= MEMSTICK_CAP_PAR4;
+
+ if (boot_block->attr.device_type & 0x03)
+ msb->read_only = true;
+
+ dbg("Total block count = %d", msb->block_count);
+ dbg("Each block consists of %d pages", msb->pages_in_block);
+ dbg("Page size = %d bytes", msb->page_size);
+ dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+ dbg("Read only: %d", msb->read_only);
+
+#if 0
+ /* Now we can switch the interface */
+ if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+ msb_switch_to_parallel(msb);
+#endif
+
+ error = msb_cache_init(msb);
+ if (error)
+ return error;
+
+ error = msb_ftl_initialize(msb);
+ if (error)
+ return error;
+
+
+ /* Read the bad block table */
+ error = msb_read_bad_block_table(msb, 0);
+
+ if (error && error != -ENOMEM) {
+ dbg("failed to read bad block table from primary boot block, trying from backup");
+ error = msb_read_bad_block_table(msb, 1);
+ }
+
+ if (error)
+ return error;
+
+ /* *drum roll* Scan the media */
+ error = msb_ftl_scan(msb);
+ if (error) {
+ pr_err("Scan of media failed");
+ return error;
+ }
+
+ return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+ int error = 0;
+ off_t offset = 0;
+ *sucessfuly_written = 0;
+
+ while (offset < len) {
+ if (page == 0 && len - offset >= msb->block_size) {
+
+ if (msb->cache_block_lba == lba)
+ msb_cache_discard(msb);
+
+ dbg_verbose("Writing whole lba %d", lba);
+ error = msb_update_block(msb, lba, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->block_size;
+ *sucessfuly_written += msb->block_size;
+ lba++;
+ continue;
+ }
+
+ error = msb_cache_write(msb, lba, page, false, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_written += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+ int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+ int error = 0;
+ int offset = 0;
+ *sucessfuly_read = 0;
+
+ while (offset < len) {
+
+ error = msb_cache_read(msb, lba, page, sg, offset);
+ if (error)
+ return error;
+
+ offset += msb->page_size;
+ *sucessfuly_read += msb->page_size;
+
+ page++;
+ if (page == msb->pages_in_block) {
+ page = 0;
+ lba++;
+ }
+ }
+ return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+ struct msb_data *msb = container_of(work, struct msb_data, io_work);
+ int page, error, len;
+ sector_t lba;
+ unsigned long flags;
+ struct scatterlist *sg = msb->prealloc_sg;
+
+ dbg_verbose("IO: work started");
+
+ while (1) {
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (msb->need_flush_cache) {
+ msb->need_flush_cache = false;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ msb_cache_flush(msb);
+ continue;
+ }
+
+ if (!msb->req) {
+ msb->req = blk_fetch_request(msb->queue);
+ if (!msb->req) {
+ dbg_verbose("IO: no more requests exiting");
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* If card was removed meanwhile */
+ if (!msb->req)
+ return;
+
+ /* process the request */
+ dbg_verbose("IO: processing new request");
+ blk_rq_map_sg(msb->queue, msb->req, sg);
+
+ lba = blk_rq_pos(msb->req);
+
+ sector_div(lba, msb->page_size / 512);
+ page = do_div(lba, msb->pages_in_block);
+
+ if (rq_data_dir(msb->req) == READ)
+ error = msb_do_read_request(msb, lba, page, sg,
+ blk_rq_bytes(msb->req), &len);
+ else
+ error = msb_do_write_request(msb, lba, page, sg,
+ blk_rq_bytes(msb->req), &len);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+
+ if (len)
+ if (!__blk_end_request(msb->req, 0, len))
+ msb->req = NULL;
+
+ if (error && msb->req) {
+ dbg_verbose("IO: ending one sector of the request with error");
+ if (!__blk_end_request(msb->req, error, msb->page_size))
+ msb->req = NULL;
+ }
+
+ if (msb->req)
+ dbg_verbose("IO: request still pending");
+
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ }
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct msb_data *msb = disk->private_data;
+
+ dbg_verbose("block device open");
+
+ mutex_lock(&msb_disk_lock);
+
+ if (msb && msb->card)
+ msb->usage_count++;
+
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+static void msb_data_clear(struct msb_data *msb)
+{
+ kfree(msb->boot_page);
+ kfree(msb->used_blocks_bitmap);
+ kfree(msb->lba_to_pba_table);
+ kfree(msb->cache);
+ msb->card = NULL;
+}
+
+static int msb_disk_release(struct gendisk *disk)
+{
+ struct msb_data *msb = disk->private_data;
+
+ dbg_verbose("block device release");
+ mutex_lock(&msb_disk_lock);
+
+ if (msb) {
+ if (msb->usage_count)
+ msb->usage_count--;
+
+ if (!msb->usage_count) {
+ disk->private_data = NULL;
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ put_disk(disk);
+ kfree(msb);
+ }
+ }
+ mutex_unlock(&msb_disk_lock);
+ return 0;
+}
+
+static void msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+ msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ struct msb_data *msb = bdev->bd_disk->private_data;
+ *geo = msb->geometry;
+ return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+ if (req->cmd_type != REQ_TYPE_FS &&
+ req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ blk_dump_rq_flags(req, "MS unsupported request");
+ return BLKPREP_KILL;
+ }
+ req->cmd_flags |= REQ_DONTPREP;
+ return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+ struct memstick_dev *card = q->queuedata;
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct request *req = NULL;
+
+ dbg_verbose("Submit request");
+
+ if (msb->card_dead) {
+ dbg("Refusing requests on removed card");
+
+ WARN_ON(!msb->io_queue_stopped);
+
+ while ((req = blk_fetch_request(q)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ return;
+ }
+
+ if (msb->req)
+ return;
+
+ if (!msb->io_queue_stopped)
+ queue_work(msb->io_queue, &msb->io_work);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Stopping all msblock IO");
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_stop_queue(msb->queue);
+ msb->io_queue_stopped = true;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ del_timer_sync(&msb->cache_flush_timer);
+ flush_workqueue(msb->io_queue);
+
+ if (msb->req) {
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_requeue_request(msb->queue, msb->req);
+ msb->req = NULL;
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ }
+
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ dbg("Resuming IO from msblock");
+
+ msb_invalidate_reg_window(msb);
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ if (!msb->io_queue_stopped || msb->card_dead) {
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Kick cache flush anyway, its harmless */
+ msb->need_flush_cache = true;
+ msb->io_queue_stopped = false;
+
+ spin_lock_irqsave(&msb->q_lock, flags);
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+ .open = msb_bd_open,
+ .release = msb_bd_release,
+ .getgeo = msb_bd_getgeo,
+ .owner = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct memstick_host *host = card->host;
+ int rc;
+ u64 limit = BLK_BOUNCE_HIGH;
+ unsigned long capacity;
+
+ if (host->dev.dma_mask && *(host->dev.dma_mask))
+ limit = *(host->dev.dma_mask);
+
+ mutex_lock(&msb_disk_lock);
+ msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
+ mutex_unlock(&msb_disk_lock);
+
+ if (msb->disk_id < 0)
+ return msb->disk_id;
+
+ msb->disk = alloc_disk(0);
+ if (!msb->disk) {
+ rc = -ENOMEM;
+ goto out_release_id;
+ }
+
+ msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+ if (!msb->queue) {
+ rc = -ENOMEM;
+ goto out_put_disk;
+ }
+
+ msb->queue->queuedata = card;
+ blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+ blk_queue_bounce_limit(msb->queue, limit);
+ blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+ blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+ blk_queue_max_segment_size(msb->queue,
+ MS_BLOCK_MAX_PAGES * msb->page_size);
+ blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+ sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+ msb->disk->fops = &msb_bdops;
+ msb->disk->private_data = msb;
+ msb->disk->queue = msb->queue;
+ msb->disk->driverfs_dev = &card->dev;
+ msb->disk->flags |= GENHD_FL_EXT_DEVT;
+
+ capacity = msb->pages_in_block * msb->logical_block_count;
+ capacity *= (msb->page_size / 512);
+ set_capacity(msb->disk, capacity);
+ dbg("Set total disk size to %lu sectors", capacity);
+
+ msb->usage_count = 1;
+ msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+ INIT_WORK(&msb->io_work, msb_io_work);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb->read_only)
+ set_disk_ro(msb->disk, 1);
+
+ msb_start(card);
+ add_disk(msb->disk);
+ dbg("Disk added");
+ return 0;
+
+out_put_disk:
+ put_disk(msb->disk);
+out_release_id:
+ mutex_lock(&msb_disk_lock);
+ idr_remove(&msb_disk_idr, msb->disk_id);
+ mutex_unlock(&msb_disk_lock);
+ return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+ struct msb_data *msb;
+ int rc = 0;
+
+ msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!msb)
+ return -ENOMEM;
+ memstick_set_drvdata(card, msb);
+ msb->card = card;
+ spin_lock_init(&msb->q_lock);
+
+ rc = msb_init_card(card);
+ if (rc)
+ goto out_free;
+
+ rc = msb_init_disk(card);
+ if (!rc) {
+ card->check = msb_check_card;
+ card->stop = msb_stop;
+ card->start = msb_start;
+ return 0;
+ }
+out_free:
+ memstick_set_drvdata(card, NULL);
+ msb_data_clear(msb);
+ kfree(msb);
+ return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ unsigned long flags;
+
+ if (!msb->io_queue_stopped)
+ msb_stop(card);
+
+ dbg("Removing the disk device");
+
+ /* Take care of unhandled + new requests from now on */
+ spin_lock_irqsave(&msb->q_lock, flags);
+ msb->card_dead = true;
+ blk_start_queue(msb->queue);
+ spin_unlock_irqrestore(&msb->q_lock, flags);
+
+ /* Remove the disk */
+ del_gendisk(msb->disk);
+ blk_cleanup_queue(msb->queue);
+ msb->queue = NULL;
+
+ mutex_lock(&msb_disk_lock);
+ msb_data_clear(msb);
+ mutex_unlock(&msb_disk_lock);
+
+ msb_disk_release(msb->disk);
+ memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+ msb_stop(card);
+ return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+ struct msb_data *msb = memstick_get_drvdata(card);
+ struct msb_data *new_msb = NULL;
+ bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+ msb->card_dead = true;
+ return 0;
+#endif
+ mutex_lock(&card->host->lock);
+
+ new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+ if (!new_msb)
+ goto out;
+
+ new_msb->card = card;
+ memstick_set_drvdata(card, new_msb);
+ spin_lock_init(&new_msb->q_lock);
+ sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+ if (msb_init_card(card))
+ goto out;
+
+ if (msb->block_size != new_msb->block_size)
+ goto out;
+
+ if (memcmp(msb->boot_page, new_msb->boot_page,
+ sizeof(struct ms_boot_page)))
+ goto out;
+
+ if (msb->logical_block_count != new_msb->logical_block_count ||
+ memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+ msb->logical_block_count))
+ goto out;
+
+ if (msb->block_count != new_msb->block_count ||
+ memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count / 8))
+ goto out;
+
+ card_dead = false;
+out:
+ if (card_dead)
+ dbg("Card was removed/replaced during suspend");
+
+ msb->card_dead = card_dead;
+ memstick_set_drvdata(card, msb);
+
+ if (new_msb) {
+ msb_data_clear(new_msb);
+ kfree(new_msb);
+ }
+
+ msb_start(card);
+ mutex_unlock(&card->host->lock);
+ return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_FLASH},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_ROM},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_RO},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+ MEMSTICK_CLASS_WP},
+
+ {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+ MEMSTICK_CLASS_DUO},
+ {}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = msb_id_tbl,
+ .probe = msb_probe,
+ .remove = msb_remove,
+ .suspend = msb_suspend,
+ .resume = msb_resume
+};
+
+static int major;
+
+static int __init msb_init(void)
+{
+ int rc = register_blkdev(0, DRIVER_NAME);
+
+ if (rc < 0) {
+ pr_err("failed to register major (error %d)\n", rc);
+ return rc;
+ }
+
+ major = rc;
+ rc = memstick_register_driver(&msb_driver);
+ if (rc) {
+ unregister_blkdev(major, DRIVER_NAME);
+ pr_err("failed to register memstick driver (error %d)\n", rc);
+ }
+
+ return rc;
+}
+
+static void __exit msb_exit(void)
+{
+ memstick_unregister_driver(&msb_driver);
+ unregister_blkdev(major, DRIVER_NAME);
+ idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+ "Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644
index 00000000000..96e63755098
--- /dev/null
+++ b/drivers/memstick/core/ms_block.h
@@ -0,0 +1,290 @@
+/*
+ * ms_block.h - Sony MemoryStick (legacy) storage support
+
+ * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS 32
+#define MS_BLOCK_MAX_PAGES ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID 0x0001
+#define MS_BLOCK_INVALID 0xffff
+#define MS_MAX_ZONES 16
+#define MS_BLOCKS_IN_ZONE 512
+
+#define MS_BLOCK_MAP_LINE_SZ 16
+#define MS_BLOCK_PART_SHIFT 3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+ MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+ MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | \
+ MEMSTICK_OVERWRITE_PGST0 | \
+ MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+ (MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+ (MEMSTICK_MANAGEMENT_SYSFLG | \
+ MEMSTICK_MANAGEMENT_SCMS1 | \
+ MEMSTICK_MANAGEMENT_SCMS0) \
+
+struct ms_boot_header {
+ unsigned short block_id;
+ unsigned short format_reserved;
+ unsigned char reserved0[184];
+ unsigned char data_entry;
+ unsigned char reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+ unsigned int start_addr;
+ unsigned int data_size;
+ unsigned char data_type_id;
+ unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+ struct ms_system_item disabled_block;
+ struct ms_system_item cis_idi;
+ unsigned char reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+ unsigned char memorystick_class;
+ unsigned char format_unique_value1;
+ unsigned short block_size;
+ unsigned short number_of_blocks;
+ unsigned short number_of_effective_blocks;
+ unsigned short page_size;
+ unsigned char extra_data_size;
+ unsigned char format_unique_value2;
+ unsigned char assembly_time[8];
+ unsigned char format_unique_value3;
+ unsigned char serial_number[3];
+ unsigned char assembly_manufacturer_code;
+ unsigned char assembly_model_code[3];
+ unsigned short memory_manufacturer_code;
+ unsigned short memory_device_code;
+ unsigned short implemented_capacity;
+ unsigned char format_unique_value4[2];
+ unsigned char vcc;
+ unsigned char vpp;
+ unsigned short controller_number;
+ unsigned short controller_function;
+ unsigned char reserved0[9];
+ unsigned char transfer_supporting;
+ unsigned short format_unique_value5;
+ unsigned char format_type;
+ unsigned char memorystick_application;
+ unsigned char device_type;
+ unsigned char reserved1[22];
+ unsigned char format_uniqure_value6[2];
+ unsigned char reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+ unsigned short general_config;
+ unsigned short logical_cylinders;
+ unsigned short reserved0;
+ unsigned short logical_heads;
+ unsigned short track_size;
+ unsigned short page_size;
+ unsigned short pages_per_track;
+ unsigned short msw;
+ unsigned short lsw;
+ unsigned short reserved1;
+ unsigned char serial_number[20];
+ unsigned short buffer_type;
+ unsigned short buffer_size_increments;
+ unsigned short long_command_ecc;
+ unsigned char firmware_version[28];
+ unsigned char model_name[18];
+ unsigned short reserved2[5];
+ unsigned short pio_mode_number;
+ unsigned short dma_mode_number;
+ unsigned short field_validity;
+ unsigned short current_logical_cylinders;
+ unsigned short current_logical_heads;
+ unsigned short current_pages_per_track;
+ unsigned int current_page_capacity;
+ unsigned short mutiple_page_setting;
+ unsigned int addressable_pages;
+ unsigned short single_word_dma;
+ unsigned short multi_word_dma;
+ unsigned char reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+ struct ms_boot_header header;
+ struct ms_system_entry entry;
+ struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+ unsigned int usage_count;
+ struct memstick_dev *card;
+ struct gendisk *disk;
+ struct request_queue *queue;
+ spinlock_t q_lock;
+ struct hd_geometry geometry;
+ struct attribute_group attr_group;
+ struct request *req;
+ int caps;
+ int disk_id;
+
+ /* IO */
+ struct workqueue_struct *io_queue;
+ bool io_queue_stopped;
+ struct work_struct io_work;
+ bool card_dead;
+
+ /* Media properties */
+ struct ms_boot_page *boot_page;
+ u16 boot_block_locations[2];
+ int boot_block_count;
+
+ bool read_only;
+ unsigned short page_size;
+ int block_size;
+ int pages_in_block;
+ int zone_count;
+ int block_count;
+ int logical_block_count;
+
+ /* FTL tables */
+ unsigned long *used_blocks_bitmap;
+ unsigned long *erased_blocks_bitmap;
+ u16 *lba_to_pba_table;
+ int free_block_count[MS_MAX_ZONES];
+ bool ftl_initialized;
+
+ /* Cache */
+ unsigned char *cache;
+ unsigned long valid_cache_bitmap;
+ int cache_block_lba;
+ bool need_flush_cache;
+ struct timer_list cache_flush_timer;
+
+ /* Preallocated buffers */
+ unsigned char *block_buffer;
+ struct scatterlist prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+
+
+ /* handler's local data */
+ struct ms_register_addr reg_addr;
+ bool addr_valid;
+
+ u8 command_value;
+ bool command_need_oob;
+ struct scatterlist *current_sg;
+ int current_sg_offset;
+
+ struct ms_register regs;
+ int current_page;
+
+ int state;
+ int exit_error;
+ bool int_polling;
+ unsigned long int_timeout;
+
+};
+
+enum msb_readpage_states {
+ MSB_RP_SEND_BLOCK_ADDRESS = 0,
+ MSB_RP_SEND_READ_COMMAND,
+
+ MSB_RP_SEND_INT_REQ,
+ MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+ MSB_RP_SEND_READ_STATUS_REG,
+ MSB_RP_RECIVE_STATUS_REG,
+
+ MSB_RP_SEND_OOB_READ,
+ MSB_RP_RECEIVE_OOB_READ,
+
+ MSB_RP_SEND_READ_DATA,
+ MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+ MSB_WB_SEND_WRITE_PARAMS = 0,
+ MSB_WB_SEND_WRITE_OOB,
+ MSB_WB_SEND_WRITE_COMMAND,
+
+ MSB_WB_SEND_INT_REQ,
+ MSB_WB_RECEIVE_INT_REQ,
+
+ MSB_WB_SEND_WRITE_DATA,
+ MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+ MSB_SC_SEND_WRITE_PARAMS,
+ MSB_SC_SEND_WRITE_OOB,
+ MSB_SC_SEND_COMMAND,
+
+ MSB_SC_SEND_INT_REQ,
+ MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+ MSB_RS_SEND,
+ MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+ MSB_PS_SEND_SWITCH_COMMAND,
+ MSB_PS_SWICH_HOST,
+ MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned char sec;
+ unsigned short cyl;
+ unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+ struct memstick_request **mrq);
+
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ pr_err(format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+
+#endif
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index cf8bd727dfc..25f8f93decb 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -612,8 +612,6 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
memstick_remove_host(msh);
memstick_free_host(msh);
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&(pdev->dev),
": Realtek PCI-E Memstick controller has been removed\n");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cd0b7f4a1ff..1a3163f1407 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -812,7 +812,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err)
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -850,6 +850,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
*ecc_err = 1;
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -869,6 +879,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_ABORT;
if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1;
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if (stop_status & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ stop_status);
+ *gen_err = 1;
+ }
}
/* Check for set block count errors */
@@ -1097,7 +1114,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
- int ecc_err = 0;
+ int ecc_err = 0, gen_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -1111,7 +1128,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
*/
if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -1143,6 +1160,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
u32 status;
unsigned long timeout;
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
+
timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
do {
int err = get_card_status(card, &status, 5);
@@ -1152,6 +1177,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_CMD_ERR;
}
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: general error sending status command, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ status);
+ gen_err = 1;
+ }
+
/* Timeout if the device never becomes ready for data
* and never leaves the program state.
*/
@@ -1171,6 +1203,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
(R1_CURRENT_STATE(status) == R1_STATE_PRG));
}
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
+ }
+
if (brq->data.error) {
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->data.error,
@@ -2191,10 +2230,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* is freeing the queue that stops new requests
* from being accepted.
*/
+ card = md->queue.card;
mmc_cleanup_queue(&md->queue);
if (md->flags & MMC_BLK_PACKED_CMD)
mmc_packed_clean(&md->queue);
- card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index a69df521627..0c0fc52d42c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
struct seq_file *sf = (struct seq_file *)file->private_data;
struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- char lbuf[12];
long testcase;
+ int ret;
- if (count >= sizeof(lbuf))
- return -EINVAL;
-
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- lbuf[count] = '\0';
-
- if (strict_strtol(lbuf, 10, &testcase))
- return -EINVAL;
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5d088551196..bf18b6bfce4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,7 @@
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -1196,6 +1197,49 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
+#ifdef CONFIG_OF
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * 1. Return zero on success.
+ * 2. Return negative errno: voltage-range is invalid.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges || !num_ranges) {
+ pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%s: voltage-range #%d is invalid\n",
+ np->full_name, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+#endif /* CONFIG_OF */
+
#ifdef CONFIG_REGULATOR
/**
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6fb6f77450c..49bc403e31f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -374,7 +374,7 @@ int mmc_of_parse(struct mmc_host *host)
if (!(flags & OF_GPIO_ACTIVE_LOW))
gpio_inv_cd = true;
- ret = mmc_gpio_request_cd(host, gpio);
+ ret = mmc_gpio_request_cd(host, gpio, 0);
if (ret < 0) {
dev_err(host->parent,
"Failed to request CD GPIO #%d: %d!\n",
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 837fc7386e2..ef183483d5b 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -531,6 +531,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 176d125f5b5..5e8823dc3ef 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -215,7 +215,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i;
+ int err, i, max_au;
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -239,12 +239,15 @@ static int mmc_read_ssr(struct mmc_card *card)
for (i = 0; i < 16; i++)
ssr[i] = be32_to_cpu(ssr[i]);
+ /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+ max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 && au <= 9) {
+ if (au > 0 && au <= max_au) {
card->ssr.au = 1 << (au + 4);
es = UNSTUFF_BITS(ssr, 408 - 384, 16);
et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -942,13 +945,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
- return err;
+ goto free_card;
}
if (!oldcard) {
err = mmc_sd_get_csd(host, card);
if (err)
- return err;
+ goto free_card;
mmc_decode_cid(card);
}
@@ -959,7 +962,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
- return err;
+ goto free_card;
}
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 32423510551..46596b71a32 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -135,6 +135,7 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
* @gpio: gpio number requested
+ * @debounce: debounce time in microseconds
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
* drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
@@ -143,9 +144,14 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* switching for card-detection, they are responsible for calling
* mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
*
+ * If GPIO debouncing is desired, set the debounce parameter to a non-zero
+ * value. The caller is responsible for ensuring that the GPIO driver associated
+ * with the GPIO supports debouncing, otherwise an error will be returned.
+ *
* Returns zero on success, else an error.
*/
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce)
{
struct mmc_gpio *ctx;
int irq = gpio_to_irq(gpio);
@@ -167,6 +173,12 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
*/
return ret;
+ if (debounce) {
+ ret = gpio_set_debounce(gpio, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Even if gpio_to_irq() returns a valid IRQ number, the platform might
* still prefer to poll, e.g., because that IRQ number is already used
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8a4c066787d..b7fd5ab80a4 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -284,11 +284,11 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
This selects the TI OMAP High Speed Multimedia card Interface.
- If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
- Multimedia Card slot, say Y or M here.
+ If you have an omap2plus board with a Multimedia Card slot,
+ say Y or M here.
If unsure, say N.
@@ -530,7 +530,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
- depends on ARM
+ depends on ARC || ARM
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
@@ -569,7 +569,7 @@ config MMC_DW_EXYNOS
config MMC_DW_SOCFPGA
tristate "SOCFPGA specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW
+ depends on MMC_DW && MFD_SYSCON
select MMC_DW_PLTFM
help
This selects support for Altera SoCFPGA specific extensions to the
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d422e2167e1..c41d0c36450 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -52,8 +52,6 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bdb84da7495..69e438ee043 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -378,6 +378,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
u32 *buf;
+ int ret = 0;
+
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
@@ -388,12 +390,16 @@ static int atmci_regs_show(struct seq_file *s, void *v)
* not disabling interrupts, so IMR and SR may not be
* consistent.
*/
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto out;
+
spin_lock_bh(&host->lock);
- clk_enable(host->mck);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
- clk_disable(host->mck);
spin_unlock_bh(&host->lock);
+ clk_disable_unprepare(host->mck);
+
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
@@ -442,9 +448,10 @@ static int atmci_regs_show(struct seq_file *s, void *v)
val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
+out:
kfree(buf);
- return 0;
+ return ret;
}
static int atmci_regs_open(struct inode *inode, struct file *file)
@@ -1262,6 +1269,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
unsigned int i;
+ bool unprepare_clk;
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
@@ -1277,9 +1285,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned int clock_min = ~0U;
u32 clkdiv;
+ clk_prepare(host->mck);
+ unprepare_clk = true;
+
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
clk_enable(host->mck);
+ unprepare_clk = false;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
if (host->caps.has_cfg_reg)
@@ -1347,6 +1359,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
} else {
bool any_slot_active = false;
+ unprepare_clk = false;
+
spin_lock_bh(&host->lock);
slot->clock = 0;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
@@ -1360,12 +1374,16 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mode_reg) {
atmci_readl(host, ATMCI_MR);
clk_disable(host->mck);
+ unprepare_clk = true;
}
host->mode_reg = 0;
}
spin_unlock_bh(&host->lock);
}
+ if (unprepare_clk)
+ clk_unprepare(host->mck);
+
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
@@ -2376,10 +2394,12 @@ static int __init atmci_probe(struct platform_device *pdev)
if (!host->regs)
goto err_ioremap;
- clk_enable(host->mck);
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto err_request_irq;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
host->mapbase = regs->start;
@@ -2482,11 +2502,11 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_cleanup_slot(host->slot[i], i);
}
- clk_enable(host->mck);
+ clk_prepare_enable(host->mck);
atmci_writel(host, ATMCI_IDR, ~0UL);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
if (host->dma.chan)
dma_release_channel(host->dma.chan);
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 866edef2e82..6a1fa2110a0 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -39,6 +39,7 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
+ DW_MCI_TYPE_EXYNOS5420,
};
/* Exynos implementation specific driver private data */
@@ -62,6 +63,9 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos5250-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5250,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420,
},
};
@@ -90,7 +94,8 @@ static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
host->bus_hz /= (priv->ciu_div + 1);
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
@@ -173,6 +178,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5250-dw-mshc",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc",
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index b456b0c3523..f70546a3a7c 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,9 @@ static int dw_mci_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- host->regs = pcim_iomap_table(pdev)[0];
+ host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];
+
+ pci_set_master(pdev);
ret = dw_mci_probe(host);
if (ret)
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ee525565aa7..20897529ea5 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
{
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 542407363dd..018f365e5ae 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1601,18 +1601,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
- if (pending) {
-
- /*
- * DTO fix - version 2.10a and below, and only if internal DMA
- * is configured.
- */
- if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
- if (!pending &&
- ((mci_readl(host, STATUS) >> 17) & 0x1fff))
- pending |= SDMMC_INT_DATA_OVER;
- }
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+ if (pending) {
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0308c9f1cf5..66516339e3a 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -713,7 +713,7 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
if (gpio_is_valid(pdata->gpio_card_detect)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect);
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
if (ret)
return ret;
}
@@ -783,9 +783,8 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(&pdev->dev, res);
- if (!host->base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto err_free_host;
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74145d1d51f..0a87e569134 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -36,6 +36,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
+#include <linux/mmc/slot-gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
@@ -1272,33 +1273,11 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
-static int mmc_spi_get_ro(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc->parent);
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-static int mmc_spi_get_cd(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_cd)
- return !!host->pdata->get_cd(mmc->parent);
- return -ENOSYS;
-}
-
static const struct mmc_host_ops mmc_spi_ops = {
.request = mmc_spi_request,
.set_ios = mmc_spi_set_ios,
- .get_ro = mmc_spi_get_ro,
- .get_cd = mmc_spi_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
};
@@ -1324,6 +1303,7 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_host *mmc;
struct mmc_spi_host *host;
int status;
+ bool has_ro = false;
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
@@ -1448,18 +1428,33 @@ static int mmc_spi_probe(struct spi_device *spi)
}
/* pass platform capabilities, if any */
- if (host->pdata)
+ if (host->pdata) {
mmc->caps |= host->pdata->caps;
+ mmc->caps2 |= host->pdata->caps2;
+ }
status = mmc_add_host(mmc);
if (status != 0)
goto fail_add_host;
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
+ status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
+ host->pdata->cd_debounce);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+ has_ro = true;
+ status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),
host->dma_dev ? "" : ", no DMA",
- (host->pdata && host->pdata->get_ro)
- ? "" : ", no WP",
+ has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
(mmc->caps & MMC_CAP_NEEDS_POLL)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 4ddd83f9865..06c5b0b28eb 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -757,7 +757,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (mvsd_data->gpio_card_detect &&
gpio_is_valid(mvsd_data->gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc,
- mvsd_data->gpio_card_detect);
+ mvsd_data->gpio_card_detect,
+ 0);
if (ret)
goto out;
} else {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index f38d75f46f7..e1fa3ef735e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -102,12 +102,15 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc)
BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
}
-static void mxs_mmc_reset(struct mxs_mmc_host *host)
+static int mxs_mmc_reset(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
u32 ctrl0, ctrl1;
+ int ret;
- stmp_reset_block(ssp->base);
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ return ret;
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -132,6 +135,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(ctrl0, ssp->base + HW_SSP_CTRL0);
writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
+ return 0;
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -618,21 +622,25 @@ static int mxs_mmc_probe(struct platform_device *pdev)
}
}
- ssp->clk = clk_get(&pdev->dev, NULL);
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk)) {
ret = PTR_ERR(ssp->clk);
goto out_mmc_free;
}
clk_prepare_enable(ssp->clk);
- mxs_mmc_reset(host);
+ ret = mxs_mmc_reset(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
+ goto out_clk_disable;
+ }
ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
ret = -ENODEV;
- goto out_clk_put;
+ goto out_clk_disable;
}
/* set mmc core parameters */
@@ -685,9 +693,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
out_free_dma:
if (ssp->dmach)
dma_release_channel(ssp->dmach);
-out_clk_put:
+out_clk_disable:
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
out_mmc_free:
mmc_free_host(mmc);
return ret;
@@ -705,7 +712,6 @@ static int mxs_mmc_remove(struct platform_device *pdev)
dma_release_channel(ssp->dmach);
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index d720b5e05b9..6e218fb1a66 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -50,25 +50,6 @@ static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
return container_of(dev->platform_data, struct of_mmc_spi, pdata);
}
-static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num)
-{
- struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- bool active_low = oms->alow_gpios[gpio_num];
- bool value = gpio_get_value(oms->gpios[gpio_num]);
-
- return active_low ^ value;
-}
-
-static int of_mmc_spi_get_cd(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, CD_GPIO);
-}
-
-static int of_mmc_spi_get_ro(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, WP_GPIO);
-}
-
static int of_mmc_spi_init(struct device *dev,
irqreturn_t (*irqhandler)(int, void *), void *mmc)
{
@@ -130,20 +111,22 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (!gpio_is_valid(oms->gpios[i]))
continue;
- ret = gpio_request(oms->gpios[i], dev_name(dev));
- if (ret < 0) {
- oms->gpios[i] = -EINVAL;
- continue;
- }
-
if (gpio_flags & OF_GPIO_ACTIVE_LOW)
oms->alow_gpios[i] = true;
}
- if (gpio_is_valid(oms->gpios[CD_GPIO]))
- oms->pdata.get_cd = of_mmc_spi_get_cd;
- if (gpio_is_valid(oms->gpios[WP_GPIO]))
- oms->pdata.get_ro = of_mmc_spi_get_ro;
+ if (gpio_is_valid(oms->gpios[CD_GPIO])) {
+ oms->pdata.cd_gpio = oms->gpios[CD_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_CD_GPIO;
+ if (!oms->alow_gpios[CD_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+ if (gpio_is_valid(oms->gpios[WP_GPIO])) {
+ oms->pdata.ro_gpio = oms->gpios[WP_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_RO_GPIO;
+ if (!oms->alow_gpios[WP_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != 0) {
@@ -166,15 +149,10 @@ void mmc_spi_put_pdata(struct spi_device *spi)
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- int i;
if (!dev->platform_data || !np)
return;
- for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
- if (gpio_is_valid(oms->gpios[i]))
- gpio_free(oms->gpios[i]);
- }
kfree(oms);
dev->platform_data = NULL;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1865321465c..6ac63df645c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/dmaengine.h>
#include <linux/seq_file.h>
+#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1041,6 +1042,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
}
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
if (end_cmd || ((status & CC_EN) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC_EN)) && host->mrq)
@@ -1060,7 +1062,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
omap_hsmmc_do_irq(host, status);
/* Flush posted write */
- OMAP_HSMMC_WRITE(host->base, STAT, status);
status = OMAP_HSMMC_READ(host->base, STAT);
}
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 0584a1c788b..36fa2df0466 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -119,7 +119,7 @@ static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
return byte;
}
-unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
{
return MIN_FREQ;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1dd5ba85875..abc8cf01e6e 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -616,7 +616,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 15039e2d1c1..e328252ebf2 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -316,6 +316,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
/* call to generic mmc_of_parse to support additional capabilities */
mmc_of_parse(host->mmc);
+ mmc_of_parse_voltage(np, &host->ocr_mask);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index bf99359a3a9..793dacd3b84 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -278,7 +278,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
if (gpio_is_valid(pdata->ext_cd_gpio)) {
- ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
+ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
+ 0);
if (ret) {
dev_err(mmc_dev(host->mmc),
"failed to allocate card detect gpio\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 926aaf6acc6..6debda95215 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -296,9 +296,12 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
unsigned long timeout;
u16 clk = 0;
- /* don't bother if the clock is going off */
- if (clock == 0)
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ host->clock = clock;
return;
+ }
sdhci_s3c_set_clock(host, clock);
@@ -608,6 +611,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
host->hw_name = "samsung-hsmmc";
host->ops = &sdhci_s3c_ops;
host->quirks = 0;
+ host->quirks2 = 0;
host->irq = irq;
/* Setup quirks for the controller */
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 62a4a835acc..696122c1b46 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -84,7 +84,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* gets setup in sdhci_add_host() and we oops.
*/
if (gpio_is_valid(priv->gpio_cd)) {
- ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd);
+ ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
if (ret) {
dev_err(&pdev->dev, "card detect irq request failed: %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index dd2c083c434..7a7fb4f0d5a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3119,6 +3119,9 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_MULTIPLIER;
}
+ if (host->ocr_mask)
+ ocr_avail = host->ocr_mask;
+
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
@@ -3213,6 +3216,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->tuning_timer.function = sdhci_tuning_timer;
}
+ sdhci_init(host, 0);
+
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
if (ret) {
@@ -3221,8 +3226,6 @@ int sdhci_add_host(struct sdhci_host *host)
goto untasklet;
}
- sdhci_init(host, 0);
-
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs(host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6706b5e3b97..36629a024aa 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -61,6 +61,7 @@
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/spinlock.h>
#include <linux/module.h>
@@ -133,6 +134,8 @@
INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
+
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@@ -161,7 +164,7 @@
#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
- MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
@@ -243,6 +246,8 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
+ bool ccs_enable; /* Command Completion Signal support */
+ bool clk_ctrl2_enable;
struct mutex thread_lock;
/* DMA support */
@@ -386,25 +391,29 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
host->dma_active = false;
- if (!pdata)
- return;
-
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
return;
+ }
/* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_tx);
+ host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_tx : NULL,
+ &host->pd->dev, "tx");
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
- cfg.slave_id = pdata->slave_id_tx;
+ /* In the OF case the driver will get the slave ID from the DT */
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
@@ -412,15 +421,17 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgtx;
- host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_rx);
+ host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_rx : NULL,
+ &host->pd->dev, "rx");
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto erqrx;
- cfg.slave_id = pdata->slave_id_rx;
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = res->start + MMCIF_CE_DATA;
@@ -485,8 +496,12 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ if (host->ccs_enable)
+ tmp |= SCCSTO_29;
+ if (host->clk_ctrl2_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
- SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
/* byte swap on */
sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
}
@@ -866,6 +881,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
break;
}
+ if (host->ccs_enable)
+ mask |= MASK_MCCSTO;
+
if (mrq->data) {
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
@@ -873,7 +891,10 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
opc = sh_mmcif_set_cmd(host, mrq);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
/* set arg */
sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
@@ -956,11 +977,8 @@ static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
{
- struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
struct mmc_host *mmc = host->mmc;
- if (pd && pd->set_pwr)
- pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
if (!IS_ERR(mmc->supply.vmmc))
/* Errors ignored... */
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
@@ -1241,11 +1259,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
- u32 state;
+ u32 state, mask;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(state & sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK)));
+ mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
if (state & ~MASK_CLEAN)
@@ -1379,6 +1400,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host->mmc = mmc;
host->addr = reg;
host->timeout = msecs_to_jiffies(1000);
+ host->ccs_enable = !pd || !pd->ccs_unsupported;
+ host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
host->pd = pdev;
@@ -1436,7 +1459,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
}
if (pd && pd->use_cd_gpio) {
- ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
if (ret < 0)
goto erqcd;
}
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ebea749297c..87ed3fb5149 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -70,20 +70,6 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
clk_disable(priv->clk);
}
-static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- p->set_pwr(pdev, state);
-}
-
-static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- return p->get_cd(pdev);
-}
-
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
@@ -129,7 +115,12 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,shmobile-sdhi" },
{ .compatible = "renesas,sh7372-sdhi" },
+ { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{ .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -180,10 +171,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->capabilities |= p->tmio_caps;
mmc_data->capabilities2 |= p->tmio_caps2;
mmc_data->cd_gpio = p->cd_gpio;
- if (p->set_pwr)
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- if (p->get_cd)
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
/*
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 47bdb8fa341..65edb4a6245 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
@@ -116,7 +117,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
@@ -197,7 +198,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b72edb72f7d..b3802256f95 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -795,9 +795,13 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* omap_hsmmc.c driver does.
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
- regulator_enable(mmc->supply.vqmmc);
+ ret = regulator_enable(mmc->supply.vqmmc);
udelay(200);
}
+
+ if (ret < 0)
+ dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+ ret);
}
static void tmio_mmc_power_off(struct tmio_mmc_host *host)
@@ -932,25 +936,11 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
-static int tmio_mmc_get_cd(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_cd(mmc);
- if (ret >= 0)
- return ret;
-
- if (!pdata->get_cd)
- return -ENOSYS;
- else
- return pdata->get_cd(host->pdev);
-}
-
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
- .get_cd = tmio_mmc_get_cd,
+ .get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
@@ -1106,7 +1096,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cb9f361c03a..e9028ad05ff 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2079,7 +2079,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 39e5b1c7ffe..72df399c4ab 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2404,8 +2404,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ce4677668e2..eeab40b01b7 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -349,6 +349,8 @@ static ssize_t bonding_store_mode(struct device *d,
goto out;
}
+ /* don't cache arp_validate between modes */
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
pr_info("%s: setting mode to %s (%d).\n",
@@ -419,27 +421,39 @@ static ssize_t bonding_store_arp_validate(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+ if (!rtnl_trylock())
+ return restart_syscall();
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
pr_err("%s: arp_validate only supported in active-backup mode.\n",
bond->dev->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
pr_info("%s: setting arp_validate to %s (%d).\n",
bond->dev->name, arp_validate_tbl[new_value].modename,
new_value);
+ if (bond->dev->flags & IFF_UP) {
+ if (!new_value)
+ bond->recv_probe = NULL;
+ else if (bond->params.arp_interval)
+ bond->recv_probe = bond_arp_rcv;
+ }
bond->params.arp_validate = new_value;
+out:
+ rtnl_unlock();
- return count;
+ return ret;
}
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
@@ -555,8 +569,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
if (!rtnl_trylock())
return restart_syscall();
@@ -599,8 +613,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* is called.
*/
if (!new_value) {
+ if (bond->params.arp_validate)
+ bond->recv_probe = NULL;
cancel_delayed_work_sync(&bond->arp_work);
} else {
+ /* arp_validate can be set only in active-backup mode */
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f7ab16185f6..7ad8bd5cc94 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -430,6 +430,7 @@ static inline bool slave_can_tx(struct slave *slave)
struct bond_net;
+int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8ac48fbf8a6..b9a5fb6400d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -926,13 +926,13 @@ static int bcm_enet_open(struct net_device *dev)
if (ret)
goto out_phy_disconnect;
- ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+ ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
dev->name, dev);
if (ret)
goto out_freeirq;
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
@@ -2156,13 +2156,13 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq;
if (priv->irq_tx != -1) {
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2361bf236ce..90045c920d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* If we fail to allocate a substitute page, we simply stop
where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
if (unlikely(err)) {
bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
@@ -616,12 +616,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
kfree(data);
}
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
- if (fp->rx_frag_size)
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfp_mask & __GFP_WAIT))
+ return (void *)__get_free_page(gfp_mask);
+
return netdev_alloc_frag(fp->rx_frag_size);
+ }
- return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}
#ifdef CONFIG_INET
@@ -701,7 +706,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = bnx2x_frag_alloc(fp);
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
@@ -752,15 +757,15 @@ drop:
bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = bnx2x_frag_alloc(fp);
+ data = bnx2x_frag_alloc(fp, gfp_mask);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -953,7 +958,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
memcpy(skb->data, data + pad, len);
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
} else {
- if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
@@ -1313,7 +1319,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = bnx2x_frag_alloc(fp);
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1335,7 +1342,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
BNX2X_ERR("was only able to allocate %d rx sges\n",
i);
BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -4221,7 +4229,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
failure_cnt++;
continue;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 634a793c1c4..2f8dbbbd7a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -7645,6 +7645,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
bnx2x_iov_init_dq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index b26eb83069b..2604b6204ab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1756,9 +1756,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the number of VF allowed doorbells to the full DQ range */
- REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
-
/* set the VF doorbell threshold */
REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f0e7ed20a75..149ac85b5f9 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,4 +241,22 @@ config IXGBEVF
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
+config I40E
+ tristate "Intel(R) Ethernet Controller XL710 Family support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) Ethernet Controller XL710 Family of
+ devices. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40e.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index c8210e68866..5bae933efc7 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 00000000000..479b2c4e552
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,44 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-objs := i40e_main.o \
+ i40e_ethtool.o \
+ i40e_adminq.o \
+ i40e_common.o \
+ i40e_hmc.o \
+ i40e_lan_hmc.o \
+ i40e_nvm.o \
+ i40e_debugfs.o \
+ i40e_diag.o \
+ i40e_txrx.o \
+ i40e_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 00000000000..b5252eb8a6c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,558 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/version.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+
+/* Useful i40e defaults */
+#define I40E_BASE_PF_SEID 16
+#define I40E_BASE_VSI_SEID 512
+#define I40E_BASE_VEB_SEID 288
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 32
+#define I40E_AQ_WORK_LIMIT 16
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_MSG_ENABLE 4
+
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_MID_SHIFT 4
+#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+
+/* magic for getting defines into strings */
+#define STRINGIFY(foo) #foo
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#define I40E_RX_DESC(R, i) \
+ ((ring_is_16byte_desc_enabled(R)) \
+ ? (union i40e_32byte_rx_desc *) \
+ (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
+ : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+#define I40E_TX_DESC(R, i) \
+ (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i) \
+ (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* default to trying for four seconds */
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* driver state flags */
+enum i40e_state_t {
+ __I40E_TESTING,
+ __I40E_CONFIG_BUSY,
+ __I40E_CONFIG_DONE,
+ __I40E_DOWN,
+ __I40E_NEEDS_RESTART,
+ __I40E_SERVICE_SCHED,
+ __I40E_ADMINQ_EVENT_PENDING,
+ __I40E_MDD_EVENT_PENDING,
+ __I40E_VFLR_EVENT_PENDING,
+ __I40E_RESET_RECOVERY_PENDING,
+ __I40E_RESET_INTR_RECEIVED,
+ __I40E_REINIT_REQUESTED,
+ __I40E_PF_RESET_REQUESTED,
+ __I40E_CORE_RESET_REQUESTED,
+ __I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_FILTER_OVERFLOW_PROMISC,
+};
+
+enum i40e_interrupt_policy {
+ I40E_INTERRUPT_BEST_CASE,
+ I40E_INTERRUPT_MEDIUM,
+ I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+ u16 num_entries;
+ u16 search_hint;
+ u16 list[0];
+#define I40E_PILE_VALID_BIT 0x8000
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
+#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
+struct i40e_fdir_data {
+ u16 q_index;
+ u8 flex_off;
+ u8 pctype;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fd_status;
+ u16 cnt_index;
+ u32 fd_id;
+ u8 *raw_packet;
+};
+
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
+#define I40E_MAX_USER_PRIORITY 8
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+ u16 qoffset; /* Queue offset from base queue */
+ u16 qcount; /* Total Queues */
+ u8 netdev_tc; /* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+ u8 numtc; /* Total number of enabled TCs */
+ u8 enabled_tc; /* TC map */
+ struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+ struct pci_dev *pdev;
+ struct i40e_hw hw;
+ unsigned long state;
+ unsigned long link_check_timeout;
+ struct msix_entry *msix_entries;
+ u16 num_msix_entries;
+ bool fc_autoneg_status;
+
+ u16 eeprom_version;
+ u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
+ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
+ u16 num_req_vfs; /* num vfs requested for this vf */
+ u16 num_vf_qps; /* num queue pairs per vf */
+ u16 num_tc_qps; /* num queue pairs per TC */
+ u16 num_lan_qps; /* num lan queues this pf has set up */
+ u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ u16 rss_size; /* num queues in the RSS array */
+ u16 rss_size_max; /* HW defined max RSS queues */
+ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u8 atr_sample_rate;
+
+ enum i40e_interrupt_policy int_policy;
+ u16 rx_itr_default;
+ u16 tx_itr_default;
+ u16 msg_enable;
+ char misc_int_name[IFNAMSIZ + 9];
+ u16 adminq_work_limit; /* num of admin receive queue desc to process */
+ int service_timer_period;
+ struct timer_list service_timer;
+ struct work_struct service_task;
+
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
+#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
+#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
+#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
+#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
+#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
+#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ bool stat_offsets_loaded;
+ struct i40e_hw_port_stats stats;
+ struct i40e_hw_port_stats stats_offsets;
+ u32 tx_timeout_count;
+ u32 tx_timeout_recovery_level;
+ unsigned long tx_timeout_last_recovery;
+ u32 hw_csum_rx_error;
+ u32 led_status;
+ u16 corer_count; /* Core reset count */
+ u16 globr_count; /* Global reset count */
+ u16 empr_count; /* EMP reset count */
+ u16 pfr_count; /* PF reset count */
+
+ struct mutex switch_mutex;
+ u16 lan_vsi; /* our default LAN VSI */
+ u16 lan_veb; /* initial relay, if exists */
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
+ u16 next_vsi; /* Next unallocated VSI - 0-based! */
+ struct i40e_vsi **vsi;
+ struct i40e_veb *veb[I40E_MAX_VEB];
+
+ struct i40e_lump_tracking *qp_pile;
+ struct i40e_lump_tracking *irq_pile;
+
+ /* switch config info */
+ u16 pf_seid;
+ u16 main_vsi_seid;
+ u16 mac_seid;
+ struct i40e_aqc_get_switch_config_data *sw_config;
+ struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+
+ /* sr-iov config info */
+ struct i40e_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u32 vf_aq_requests;
+
+ /* DCBx/DCBNL capability for PF that indicates
+ * whether DCBx is managed by firmware or host
+ * based agent (LLDPAD). Also, indicates what
+ * flavor of DCBx protocol (IEEE/CEE) is supported
+ * by the device. For now we're supporting IEEE
+ * mode only.
+ */
+ u16 dcbx_cap;
+
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
+ struct i40e_filter_control_settings filter_settings;
+};
+
+struct i40e_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+ s16 vlan;
+ u8 counter; /* number of instances of this filter */
+ bool is_vf; /* filter belongs to a VF */
+ bool is_netdev; /* filter belongs to a netdev */
+ bool changed; /* filter needs to be sync'd to the HW */
+};
+
+struct i40e_veb {
+ struct i40e_pf *pf;
+ u16 idx;
+ u16 veb_idx; /* index of VEB parent */
+ u16 seid;
+ u16 uplink_seid;
+ u16 stats_idx; /* index of VEB parent */
+ u8 enabled_tc;
+ u16 flags;
+ u16 bw_limit;
+ u8 bw_max_quanta;
+ bool is_abs_credits;
+ u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+ struct kobject *kobj;
+ bool stat_offsets_loaded;
+ struct i40e_eth_stats stats;
+ struct i40e_eth_stats stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ bool netdev_registered;
+ bool stat_offsets_loaded;
+
+ u32 current_netdev_flags;
+ unsigned long state;
+#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
+#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+ unsigned long flags;
+
+ struct list_head mac_filter_list;
+
+ /* VSI stats */
+ struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_offsets;
+ struct i40e_eth_stats eth_stats;
+ struct i40e_eth_stats eth_stats_offsets;
+ u32 tx_restart;
+ u32 tx_busy;
+ u32 rx_buf_failed;
+ u32 rx_page_failed;
+
+ /* These are arrays of rings, allocated at run-time */
+ struct i40e_ring *rx_rings;
+ struct i40e_ring *tx_rings;
+
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ u16 max_frame;
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+
+ /* List of q_vectors allocated to this VSI */
+ struct i40e_q_vector *q_vectors;
+ int num_q_vectors;
+ int base_vector;
+
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
+ u16 uplink_seid;
+
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 num_desc;
+ enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
+ u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+
+ struct i40e_tc_configuration tc_config;
+ struct i40e_aqc_vsi_properties_data info;
+
+ /* VSI BW limit (absolute across all TCs) */
+ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
+
+ /* Relative TC credits across VSIs */
+ u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit credits within VSI */
+ u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit max quanta within VSI */
+ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+
+ /* VSI specific handlers */
+ irqreturn_t (*irq_handler)(int irq, void *data);
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+ struct i40e_vsi *vsi;
+};
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+ struct i40e_vsi *vsi;
+
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
+
+ struct napi_struct napi;
+
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+
+ char name[IFNAMSIZ + 9];
+ cpumask_t affinity_mask;
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40e_pf *pf;
+};
+
+/**
+ * i40e_fw_version_str - format the FW and NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
+ >> I40E_NVM_VERSION_MID_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
+ >> I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack);
+
+ return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+ irqreturn_t (*irq_handler)(int, void *))
+{
+ vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: the first quad word of the program status descriptor
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+ return I40E_RX_PROG_STATUS_DESC_LENGTH ==
+ (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern const char i40e_driver_name[];
+extern const char i40e_driver_version_str[];
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+ bool printconfig);
+
+/* needed by i40e_main.c */
+void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add);
+
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
+ struct i40e_vsi *start_vsi);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+
+#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 00000000000..0c524fa9f81
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,983 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.asq.desc = hw->aq.asq_mem.va;
+ hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
+
+ ret_code = i40e_allocate_virt_mem(hw, &mem,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ return ret_code;
+ }
+
+ hw->aq.asq.details = mem.va;
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.arq.desc = hw->aq.arq_mem.va;
+ hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ mem.va = hw->aq.asq.details;
+ i40e_free_virt_mem(hw, &mem);
+ hw->aq.asq.details = NULL;
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq_mem);
+ hw->aq.arq_mem.va = NULL;
+ hw->aq.arq_mem.pa = 0;
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* now free the buffer info list */
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ATQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ATQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_asq(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ARQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ARQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_arq(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40e_init_adminq(struct i40e_hw *hw)
+{
+ u16 eetrack_lo, eetrack_hi;
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code)
+ goto init_adminq_free_arq;
+
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = 0;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @asq: pointer to the adminq send ring
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @opaque: pointer to info to be used in async cleanup
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ memcpy(details, cmd_details,
+ sizeof(struct i40e_asq_cmd_details));
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40e_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ u32 reg = 0;
+
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+ reg = hw->aq.num_asq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ATQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ATQLEN, reg);
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+ reg = hw->aq.num_arq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ARQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ARQLEN, reg);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 00000000000..22e5ed683e4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ void *desc; /* Descriptor ring memory */
+ void *details; /* ASQ details */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u64 dma_addr; /* Physical address of the ring */
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
+ struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 00000000000..e61ebdd5a5f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (direct 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
+ * use the generic i40e_aqc_switch_seid descriptor format
+ * use the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+ __le32 key_low;
+ __le32 key_high;
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_index;
+#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
+#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_entries;
+ u8 tunnels_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+struct i40e_aqc_tunnel_key_structure {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 00000000000..3b1cc214f9d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 00000000000..c21df7bc3b1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,2041 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_SFP_XL710_DEVICE_ID:
+ case I40E_SFP_X710_DEVICE_ID:
+ case I40E_QEMU_DEVICE_ID:
+ case I40E_KX_A_DEVICE_ID:
+ case I40E_KX_B_DEVICE_ID:
+ case I40E_KX_C_DEVICE_ID:
+ case I40E_KX_D_DEVICE_ID:
+ case I40E_QSFP_A_DEVICE_ID:
+ case I40E_QSFP_B_DEVICE_ID:
+ case I40E_QSFP_C_DEVICE_ID:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_VF_DEVICE_ID:
+ case I40E_VF_HV_DEVICE_ID:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @cap: pointer to adminq command descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ u32 reg;
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = le16_to_cpu(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = cpu_to_le16(flags);
+ memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
+ memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ i40e_status status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+{
+ i40e_status status = 0;
+
+ /* Make sure it is not a multicast address */
+ if (I40E_IS_MULTICAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is multicast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (I40E_IS_BROADCAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is broadcast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ hw_dbg(hw, "MAC address is all zeros\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+i40e_status i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 wait_cnt = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+ >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ msleep(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Determine the PF number based on the PCI fn */
+ hw->pf_id = (u8)hw->bus.func;
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!wait_cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ hw_dbg(hw, "PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+ return 0;
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ /* Clear single descriptor fetch/write-back mode */
+ reg = rd32(hw, I40E_GLLAN_RCTL_0);
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+}
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 gpio_val = 0;
+ u32 mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, else on (see EAS for mode details)
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode)
+{
+ u32 gpio_val = 0;
+ u32 led_mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val |= led_mode;
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ }
+}
+
+/* Admin command wrappers */
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ i40e_status status;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = cpu_to_le16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(struct i40e_link_status));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+
+ if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ /* save link status information */
+ if (link)
+ memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = cpu_to_le16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = le16_to_cpu(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (fw_major_version != NULL)
+ *fw_major_version = le16_to_cpu(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = le16_to_cpu(resp->fw_minor);
+ if (api_major_version != NULL)
+ *api_major_version = le16_to_cpu(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = le16_to_cpu(resp->api_minor);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @event: driver event: driver ok, start or stop
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ i40e_status status;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if link is up, false if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ bool link_status = false;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status)
+ goto i40e_get_link_status_exit;
+ }
+
+ link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+ return link_status;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ i40e_status status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = cpu_to_le16(uplink_seid);
+ cmd->downlink_seid = cpu_to_le16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+ cmd->veb_flags = cpu_to_le16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = le16_to_cpu(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating_veb: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_unallocated: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = cpu_to_le16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = le16_to_cpu(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = cpu_to_le32(vfid);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = cpu_to_le16(resource);
+ cmd->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ i40e_status status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u32 reg_val;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = le16_to_cpu(cap->id);
+ number = le32_to_cpu(cap->number);
+ logical_id = le32_to_cpu(cap->logical_id);
+ phys_id = le32_to_cpu(cap->phys_id);
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = true;
+ reg_val = rd32(hw, I40E_PFQF_CTL_0);
+ if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
+ p->rss_table_size = number;
+ else
+ p->rss_table_size = 128;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+ if (number == 1)
+ p->mfp_mode_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ i40e_status status = 0;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = le16_to_cpu(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = le16_to_cpu(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = le16_to_cpu(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_status status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns 0 if the values passed are valid and within
+ * range else returns an error.
+ **/
+static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax, pe_fmax;
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
+ val = rd32(hw, I40E_GLHMC_PEXFMAX);
+ pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
+ >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
+ if (pe_filt_size + pe_cntx_size > pe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ i40e_status ret = 0;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 00000000000..8dbd91f64b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf - the pf structure to search for the vsi
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if (seid < 0)
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf - the pf structure to search for the veb
+ * @seid - seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == seid)
+ return pf->veb[i];
+ return NULL;
+}
+
+/**************************************************************
+ * dump
+ * The dump entry in debugfs is for getting a data snapshow of
+ * the driver's current configuration and runtime details.
+ * When the filesystem entry is written, a snapshot is taken.
+ * When the entry is read, the most recent snapshot data is dumped.
+ **************************************************************/
+static char *i40e_dbg_dump_buf;
+static ssize_t i40e_dbg_dump_data_len;
+static ssize_t i40e_dbg_dump_buffer_len;
+
+/**
+ * i40e_dbg_dump_read - read the dump data
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int bytes_not_copied;
+ int len;
+
+ /* is *ppos bigger than the available data? */
+ if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
+ return 0;
+
+ /* be sure to not read beyond the end of available data */
+ len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
+
+ bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos += len;
+ return len;
+}
+
+/**
+ * i40e_dbg_prep_dump_buf
+ * @pf: the pf we're working with
+ * @buflen: the desired buffer length
+ *
+ * Return positive if success, 0 if failed
+ **/
+static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
+{
+ /* if not already big enough, prep for re alloc */
+ if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ }
+
+ /* get a new buffer if needed */
+ if (!i40e_dbg_dump_buf) {
+ i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
+ if (i40e_dbg_dump_buf != NULL)
+ i40e_dbg_dump_buffer_len = buflen;
+ }
+
+ return i40e_dbg_dump_buffer_len;
+}
+
+/**
+ * i40e_dbg_dump_write - trigger a datadump snapshot
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ *
+ * Any write clears the stats
+ **/
+static ssize_t i40e_dbg_dump_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ char dump_request_buf[16];
+ bool seid_found = false;
+ int bytes_not_copied;
+ long seid = -1;
+ int buflen = 0;
+ int i, ret;
+ int len;
+ u8 *p;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(dump_request_buf))
+ return -ENOSPC;
+
+ bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ dump_request_buf[count] = '\0';
+
+ /* decode the SEID given to be dumped */
+ ret = kstrtol(dump_request_buf, 0, &seid);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
+ dump_request_buf);
+ } else if (seid == 0) {
+ seid_found = true;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_data_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ dev_info(&pf->pdev->dev, "debug buffer freed\n");
+
+ } else if (seid == pf->pf_seid || seid == 1) {
+ seid_found = true;
+
+ buflen = sizeof(struct i40e_pf);
+ buflen += (sizeof(struct i40e_aq_desc)
+ * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+
+ len = sizeof(struct i40e_pf);
+ memcpy(p, pf, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_asq_entries);
+ memcpy(p, pf->hw.aq.asq.desc, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_arq_entries);
+ memcpy(p, pf->hw.aq.arq.desc, len);
+ p += len;
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "PF seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ } else if (seid >= I40E_BASE_VSI_SEID) {
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ int filter_count = 0;
+
+ mutex_lock(&pf->switch_mutex);
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_vsi);
+ buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
+ buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ filter_count++;
+ buflen += sizeof(struct i40e_mac_filter) * filter_count;
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+ seid_found = true;
+
+ len = sizeof(struct i40e_vsi);
+ memcpy(p, vsi, len);
+ p += len;
+
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+
+ len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
+ p += len;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_tx_buffer);
+ memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ p += len;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_rx_buffer);
+ memcpy(p, vsi->rx_rings[i].rx_bi, len);
+ p += len;
+ }
+
+ /* macvlan filter list */
+ len = sizeof(struct i40e_mac_filter);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ memcpy(p, f, len);
+ p += len;
+ }
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VSI seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ } else if (seid >= I40E_BASE_VEB_SEID) {
+ struct i40e_veb *veb = NULL;
+
+ mutex_lock(&pf->switch_mutex);
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_veb);
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ seid_found = true;
+ memcpy(i40e_dbg_dump_buf, veb, buflen);
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VEB seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+write_exit:
+ if (!seid_found)
+ dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
+
+ return count;
+}
+
+static const struct file_operations i40e_dbg_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_dump_read,
+ .write = i40e_dbg_dump_write,
+};
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things. Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_command_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+ struct rtnl_link_stats64 *nstat;
+ struct i40e_mac_filter *f;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "dump %d: seid not found\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+ if (vsi->netdev)
+ dev_info(&pf->pdev->dev,
+ " netdev: name = %s\n",
+ vsi->netdev->name);
+ if (vsi->active_vlans)
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
+ vsi->netdev_registered,
+ vsi->current_netdev_flags, vsi->state, vsi->flags);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ dev_info(&pf->pdev->dev,
+ " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+ f->macaddr, f->vlan, f->is_netdev, f->is_vf,
+ f->counter);
+ }
+ nstat = i40e_get_vsi_stats_struct(vsi);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)nstat->rx_packets,
+ (long unsigned int)nstat->rx_bytes,
+ (long unsigned int)nstat->rx_errors,
+ (long unsigned int)nstat->rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)nstat->tx_packets,
+ (long unsigned int)nstat->tx_bytes,
+ (long unsigned int)nstat->tx_errors,
+ (long unsigned int)nstat->tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)nstat->multicast,
+ (long unsigned int)nstat->collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)nstat->rx_length_errors,
+ (long unsigned int)nstat->rx_over_errors,
+ (long unsigned int)nstat->rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)nstat->rx_frame_errors,
+ (long unsigned int)nstat->rx_fifo_errors,
+ (long unsigned int)nstat->rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)nstat->tx_aborted_errors,
+ (long unsigned int)nstat->tx_carrier_errors,
+ (long unsigned int)nstat->tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)nstat->tx_heartbeat_errors,
+ (long unsigned int)nstat->tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)nstat->rx_compressed,
+ (long unsigned int)nstat->tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_packets,
+ (long unsigned int)vsi->net_stats_offsets.rx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.rx_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_packets,
+ (long unsigned int)vsi->net_stats_offsets.tx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.tx_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.multicast,
+ (long unsigned int)vsi->net_stats_offsets.collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_compressed,
+ (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ vsi->tx_restart, vsi->tx_busy,
+ vsi->rx_buf_failed, vsi->rx_page_failed);
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, vsi->rx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, vsi->rx_rings[i].dev,
+ vsi->rx_rings[i].netdev,
+ vsi->rx_rings[i].rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->rx_rings[i].state,
+ vsi->rx_rings[i].queue_index,
+ vsi->rx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, vsi->rx_rings[i].rx_hdr_len,
+ vsi->rx_rings[i].rx_buf_len,
+ vsi->rx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->rx_rings[i].hsplit,
+ vsi->rx_rings[i].next_to_use,
+ vsi->rx_rings[i].next_to_clean,
+ vsi->rx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, vsi->rx_rings[i].rx_stats.packets,
+ vsi->rx_rings[i].rx_stats.bytes,
+ vsi->rx_rings[i].rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
+ vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->rx_rings[i].size,
+ (long unsigned int)vsi->rx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->rx_rings[i].vsi,
+ vsi->rx_rings[i].q_vector);
+ }
+ }
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, vsi->tx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, vsi->tx_rings[i].dev,
+ vsi->tx_rings[i].netdev,
+ vsi->tx_rings[i].tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->tx_rings[i].state,
+ vsi->tx_rings[i].queue_index,
+ vsi->tx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, vsi->tx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->tx_rings[i].hsplit,
+ vsi->tx_rings[i].next_to_use,
+ vsi->tx_rings[i].next_to_clean,
+ vsi->tx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, vsi->tx_rings[i].tx_stats.packets,
+ vsi->tx_rings[i].tx_stats.bytes,
+ vsi->tx_rings[i].tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
+ i,
+ vsi->tx_rings[i].tx_stats.tx_busy,
+ vsi->tx_rings[i].tx_stats.completed,
+ vsi->tx_rings[i].tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->tx_rings[i].size,
+ (long unsigned int)vsi->tx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->tx_rings[i].vsi,
+ vsi->tx_rings[i].q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, vsi->tx_rings[i].dcb_tc);
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
+ vsi->work_limit, vsi->rx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
+ vsi->tx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+ dev_info(&pf->pdev->dev,
+ " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ if (vsi->q_vectors) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ dev_info(&pf->pdev->dev,
+ " q_vectors[%i]: base index = %ld\n",
+ i, ((long int)*vsi->q_vectors[i].rx.ring-
+ (long int)*vsi->q_vectors[0].rx.ring)/
+ sizeof(struct i40e_ring));
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " num_q_vectors = %i, base_vector = %i\n",
+ vsi->num_q_vectors, vsi->base_vector);
+ dev_info(&pf->pdev->dev,
+ " seid = %d, id = %d, uplink_seid = %d\n",
+ vsi->seid, vsi->id, vsi->uplink_seid);
+ dev_info(&pf->pdev->dev,
+ " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+ dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ dev_info(&pf->pdev->dev,
+ " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+ vsi->info.valid_sections, vsi->info.switch_id);
+ dev_info(&pf->pdev->dev,
+ " info: sw_reserved[] = 0x%02x 0x%02x\n",
+ vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+ dev_info(&pf->pdev->dev,
+ " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+ vsi->info.sec_flags, vsi->info.sec_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+ vsi->info.pvid, vsi->info.fcoe_pvid,
+ vsi->info.port_vlan_flags);
+ dev_info(&pf->pdev->dev,
+ " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+ vsi->info.pvlan_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+ vsi->info.ingress_table, vsi->info.egress_table);
+ dev_info(&pf->pdev->dev,
+ " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+ vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+ vsi->info.cas_pv_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+ vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+ vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+ vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+ vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+ vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+ vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+ dev_info(&pf->pdev->dev,
+ " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+ vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+ vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+ vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.queueing_opt_flags,
+ vsi->info.queueing_opt_reserved[0],
+ vsi->info.queueing_opt_reserved[1],
+ vsi->info.queueing_opt_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: up_enable_bits = 0x%02x\n",
+ vsi->info.up_enable_bits);
+ dev_info(&pf->pdev->dev,
+ " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+ vsi->info.sched_reserved, vsi->info.outer_up_table);
+ dev_info(&pf->pdev->dev,
+ " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+ vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+ vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+ vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+ vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+ dev_info(&pf->pdev->dev,
+ " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+ vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+ vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+ vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+ dev_info(&pf->pdev->dev,
+ " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+ vsi->info.stat_counter_idx, vsi->info.sched_id);
+ dev_info(&pf->pdev->dev,
+ " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+ vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+ vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+ vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+ vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+ vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+ if (vsi->back)
+ dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
+ dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
+ dev_info(&pf->pdev->dev,
+ " tc_config: numtc = %d, enabled_tc = 0x%x\n",
+ vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+ i, vsi->tc_config.tc_info[i].qoffset,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].netdev_tc);
+ }
+ dev_info(&pf->pdev->dev,
+ " bw: bw_limit = %d, bw_max_quanta = %d\n",
+ vsi->bw_limit, vsi->bw_max_quanta);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+ i, vsi->bw_ets_share_credits[i],
+ vsi->bw_ets_limit_credits[i],
+ vsi->bw_ets_max_quanta[i]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+ struct i40e_adminq_ring *ring;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* first the send (command) ring, then the receive (event) ring */
+ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+ ring = &(hw->aq.asq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+
+ dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+ ring = &(hw->aq.arq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @is_rx_ring: true if rx, false if tx
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ struct i40e_pf *pf, bool is_rx_ring)
+{
+ union i40e_rx_desc *ds;
+ struct i40e_ring ring;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d not found\n", vsi_seid);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+ dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (is_rx_ring)
+ ring = vsi->rx_rings[ring_id];
+ else
+ ring = vsi->tx_rings[ring_id];
+ if (cnt == 2) {
+ dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+ for (i = 0; i < ring.count; i++) {
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, i);
+ else
+ ds = (union i40e_rx_desc *)
+ I40E_TX_DESC(&ring, i);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n", i,
+ ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ i, ds->read.pkt_addr,
+ ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ }
+ } else if (cnt == 3) {
+ if (desc_n >= ring.count || desc_n < 0) {
+ dev_info(&pf->pdev->dev,
+ "descriptor %d not found\n", desc_n);
+ return;
+ }
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, desc_n);
+ else
+ ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ } else {
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ }
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+ i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+ struct i40e_eth_stats *estats)
+{
+ dev_info(&pf->pdev->dev, " ethstats:\n");
+ dev_info(&pf->pdev->dev,
+ " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+ estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+ dev_info(&pf->pdev->dev,
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
+ estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ dev_info(&pf->pdev->dev,
+ " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_missed, estats->rx_unknown_protocol,
+ estats->tx_bytes);
+ dev_info(&pf->pdev->dev,
+ " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+ estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+ dev_info(&pf->pdev->dev,
+ " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+ estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @stats: the stats structure to be dumped
+ **/
+static void i40e_dbg_dump_stats(struct i40e_pf *pf,
+ struct i40e_hw_port_stats *stats)
+{
+ int i;
+
+ dev_info(&pf->pdev->dev, " stats:\n");
+ dev_info(&pf->pdev->dev,
+ " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
+ stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
+ dev_info(&pf->pdev->dev,
+ " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
+ stats->mac_local_faults, stats->mac_remote_faults,
+ stats->rx_length_errors);
+ dev_info(&pf->pdev->dev,
+ " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
+ stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
+ dev_info(&pf->pdev->dev,
+ " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
+ stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
+ dev_info(&pf->pdev->dev,
+ " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
+ stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
+ dev_info(&pf->pdev->dev,
+ " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
+ stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
+ dev_info(&pf->pdev->dev,
+ " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
+ stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
+ dev_info(&pf->pdev->dev,
+ " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
+ stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
+ dev_info(&pf->pdev->dev,
+ " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
+ stats->tx_size_1023, stats->tx_size_big,
+ stats->mac_short_packet_dropped);
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_rx[i],
+ i+1, stats->priority_xon_rx[i+1],
+ i+2, stats->priority_xon_rx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_rx[i],
+ i+1, stats->priority_xoff_rx[i+1],
+ i+2, stats->priority_xoff_rx[i+2],
+ i+3, stats->priority_xoff_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_tx[i],
+ i+1, stats->priority_xon_tx[i+1],
+ i+2, stats->priority_xon_tx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_tx[i],
+ i+1, stats->priority_xoff_tx[i+1],
+ i+2, stats->priority_xoff_tx[i+2],
+ i+3, stats->priority_xoff_tx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_2_xoff[i],
+ i+1, stats->priority_xon_2_xoff[i+1],
+ i+2, stats->priority_xon_2_xoff[i+2],
+ i+3, stats->priority_xon_2_xoff[i+3]);
+ }
+
+ i40e_dbg_dump_eth_stats(pf, &stats->eth);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+ struct i40e_veb *veb;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ return;
+ }
+
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "%d: can't find veb\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev,
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
+ veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+ veb->uplink_seid);
+ i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ veb = pf->veb[i];
+ if (veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
+ }
+}
+
+#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ u8 *print_buf_start;
+ u8 *print_buf;
+ char *cmd_buf;
+ int vsi_seid;
+ int veb_seid;
+ int cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ cmd_buf[count] = '\0';
+
+ print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
+ if (!print_buf_start)
+ goto command_write_done;
+ print_buf = print_buf_start;
+
+ if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+ vsi_seid = -1;
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt == 0) {
+ /* default to PF VSI */
+ vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ } else if (vsi_seid < 0) {
+ dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+ if (vsi)
+ dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+ vsi->seid, vsi->uplink_seid);
+ else
+ dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+ } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+ sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+ i40e_vsi_release(vsi);
+
+ } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+ struct i40e_veb *veb;
+ int uplink_seid, i;
+
+ cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (uplink_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "add relay %d: bad uplink seid\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: vsi VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+ break;
+ if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+ uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: relay uplink %d not found\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+ vsi->tc_config.enabled_tc);
+ if (veb)
+ dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+ else
+ dev_info(&pf->pdev->dev, "add relay failed\n");
+
+ } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ int i;
+ cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (veb_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "del relay %d: bad relay seid\n", veb_seid);
+ goto command_write_done;
+ }
+
+ /* find the veb */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ break;
+ if (i >= I40E_MAX_VEB) {
+ dev_info(&pf->pdev->dev,
+ "del relay: relay %d not found\n", veb_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ i40e_veb_release(pf->veb[i]);
+
+ } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ struct i40e_mac_filter *f;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ f = i40e_add_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (f && !ret)
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d added to VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
+ ma, vlan, vsi_seid, f, ret);
+
+ } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_del_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d removed from VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
+ ma, vlan, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+ int v;
+ u16 vid;
+ i40e_status ret;
+
+ cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add pvid: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vid = (unsigned)v;
+ ret = i40e_vsi_add_pvid(vsi, vid);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d added to VSI %d\n",
+ vid, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d to VSI %d failed, ret=%d\n",
+ vid, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_vsi_remove_pvid(vsi);
+ dev_info(&pf->pdev->dev,
+ "del pvid: removed from VSI %d\n", vsi_seid);
+
+ } else if (strncmp(cmd_buf, "dump", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+ i40e_fetch_switch_configuration(pf, true);
+ } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_vsi_no_seid(pf);
+ } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_veb_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+ int ring_id, desc_n;
+ if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, true);
+ } else if (strncmp(&cmd_buf[10], "tx", 2)
+ == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, false);
+ } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+ i40e_dbg_dump_aq_desc(pf);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc aq\n");
+ }
+ } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
+ dev_info(&pf->pdev->dev, "pf stats:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats);
+ dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats_offsets);
+ } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+ dev_info(&pf->pdev->dev,
+ "core reset count: %d\n", pf->corer_count);
+ dev_info(&pf->pdev->dev,
+ "global reset count: %d\n", pf->globr_count);
+ dev_info(&pf->pdev->dev,
+ "emp reset count: %d\n", pf->empr_count);
+ dev_info(&pf->pdev->dev,
+ "pf reset count: %d\n", pf->pfr_count);
+ } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+ struct i40e_aqc_query_port_ets_config_resp *bw_data;
+ struct i40e_dcbx_config *cfg =
+ &pf->hw.local_dcbx_config;
+ struct i40e_dcbx_config *r_cfg =
+ &pf->hw.remote_dcbx_config;
+ int i, ret;
+
+ bw_data = kzalloc(sizeof(
+ struct i40e_aqc_query_port_ets_config_resp),
+ GFP_KERNEL);
+ if (!bw_data) {
+ ret = -ENOMEM;
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_query_port_ets_config(&pf->hw,
+ pf->mac_seid,
+ bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Query Port ETS Config AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(bw_data);
+ bw_data = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+ bw_data->tc_valid_bits,
+ bw_data->tc_strict_priority_bits,
+ le16_to_cpu(bw_data->tc_bw_max[0]),
+ le16_to_cpu(bw_data->tc_bw_max[1]));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+ bw_data->tc_bw_share_credits[i],
+ le16_to_cpu(bw_data->tc_bw_limits[i]));
+ }
+
+ kfree(bw_data);
+ bw_data = NULL;
+
+ dev_info(&pf->pdev->dev,
+ "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ cfg->etscfg.willing, cfg->etscfg.cbs,
+ cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etscfg.prioritytable[i],
+ cfg->etscfg.tcbwtable[i],
+ cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etsrec.prioritytable[i],
+ cfg->etsrec.tcbwtable[i],
+ cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ cfg->pfc.willing, cfg->pfc.mbc,
+ cfg->pfc.pfccap, cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "port app_table: num_apps=%d\n", cfg->numapps);
+ for (i = 0; i < cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, cfg->app[i].priority,
+ cfg->app[i].selector,
+ cfg->app[i].protocolid);
+ }
+ /* Peer TLV DCBX data */
+ dev_info(&pf->pdev->dev,
+ "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ r_cfg->etscfg.willing,
+ r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etscfg.prioritytable[i],
+ r_cfg->etscfg.tcbwtable[i],
+ r_cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etsrec.prioritytable[i],
+ r_cfg->etsrec.tcbwtable[i],
+ r_cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ r_cfg->pfc.willing,
+ r_cfg->pfc.mbc,
+ r_cfg->pfc.pfccap,
+ r_cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "remote port app_table: num_apps=%d\n",
+ r_cfg->numapps);
+ for (i = 0; i < r_cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, r_cfg->app[i].priority,
+ r_cfg->app[i].selector,
+ r_cfg->app[i].protocolid);
+ }
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+ dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
+ dev_info(&pf->pdev->dev, "dump stats\n");
+ dev_info(&pf->pdev->dev, "dump reset stats\n");
+ dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ }
+
+ } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+ u32 level;
+ cnt = sscanf(&cmd_buf[10], "%i", &level);
+ if (cnt) {
+ if (I40E_DEBUG_USER & level) {
+ pf->hw.debug_mask = level;
+ dev_info(&pf->pdev->dev,
+ "set hw.debug_mask = 0x%08x\n",
+ pf->hw.debug_mask);
+ }
+ pf->msg_enable = level;
+ dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ } else {
+ dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ }
+ } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+ dev_info(&pf->pdev->dev, "forcing PFR\n");
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "corer", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing CoreR\n");
+ i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "globr", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing GlobR\n");
+ i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "read", 4) == 0) {
+ u32 address;
+ u32 value;
+ cnt = sscanf(&cmd_buf[4], "%x", &address);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "read <reg>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+ address, value);
+
+ } else if (strncmp(cmd_buf, "write", 5) == 0) {
+ u32 address, value;
+ cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+ wr32(&pf->hw, address, value);
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+ address, value);
+ } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+ if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ if (cnt == 0) {
+ int i;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ i40e_vsi_reset_stats(pf->vsi[i]);
+ dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+ } else if (cnt == 1) {
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "clear_stats vsi: bad vsi %d\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ i40e_vsi_reset_stats(vsi);
+ dev_info(&pf->pdev->dev,
+ "vsi clear stats called for vsi %d\n",
+ vsi_seid);
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+ }
+ } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "pf clear stats called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+ }
+ } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
+ (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
+ struct i40e_fdir_data fd_data;
+ int ret;
+ u16 packet_len, i, j = 0;
+ char *asc_packet;
+ bool add = false;
+
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+ if (!asc_packet)
+ goto command_write_done;
+
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ kfree(asc_packet);
+ asc_packet = NULL;
+ goto command_write_done;
+ }
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+ cnt = sscanf(&cmd_buf[13],
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ &fd_data.q_index,
+ &fd_data.flex_off, &fd_data.pctype,
+ &fd_data.dest_vsi, &fd_data.dest_ctl,
+ &fd_data.fd_status, &fd_data.cnt_index,
+ &fd_data.fd_id, &packet_len, asc_packet);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "program fd_filter: bad command string, cnt=%d\n",
+ cnt);
+ kfree(asc_packet);
+ asc_packet = NULL;
+ kfree(fd_data.raw_packet);
+ goto command_write_done;
+ }
+
+ /* fix packet length if user entered 0 */
+ if (packet_len == 0)
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+
+ /* make sure to check the max as well */
+ packet_len = min_t(u16,
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+
+ dev_info(&pf->pdev->dev, "FD raw packet:\n");
+ for (i = 0; i < packet_len; i++) {
+ sscanf(&asc_packet[j], "%2hhx ",
+ &fd_data.raw_packet[i]);
+ j += 3;
+ snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed %d\n", ret);
+ }
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+ kfree(asc_packet);
+ asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+ int ret;
+ ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Stop LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+ int ret;
+ ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Start LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5],
+ "get local", 9) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ }
+ } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+ u16 buffer_len, i, bytes;
+ u16 module;
+ u32 offset;
+ u16 *buff;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+ &module, &offset, &buffer_len);
+ if (cnt == 0) {
+ module = 0;
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 1) {
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 2) {
+ buffer_len = 0;
+ } else if (cnt > 3) {
+ dev_info(&pf->pdev->dev,
+ "nvm read: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ /* Read at least 512 words */
+ if (buffer_len == 0)
+ buffer_len = 512;
+
+ bytes = 2 * buffer_len;
+ buff = kzalloc(bytes, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+ bytes, (u8 *)buff, true, NULL);
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Read NVM AQ failed err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Read NVM module=0x%x offset=0x%x words=%d\n",
+ module, offset, buffer_len);
+ for (i = 0; i < buffer_len; i++) {
+ if ((i % 16) == 0) {
+ snprintf(print_buf, 11, "\n0x%08x: ",
+ offset + i);
+ print_buf += 11;
+ }
+ snprintf(print_buf, 5, "%04x ", buff[i]);
+ print_buf += 5;
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ }
+ kfree(buff);
+ buff = NULL;
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
+ dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
+ dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
+ dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
+ dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " dump switch\n");
+ dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc aq\n");
+ dev_info(&pf->pdev->dev, " dump stats\n");
+ dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " msg_enable [level]\n");
+ dev_info(&pf->pdev->dev, " read <reg>\n");
+ dev_info(&pf->pdev->dev, " write <reg> <value>\n");
+ dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " clear_stats pf\n");
+ dev_info(&pf->pdev->dev, " pfr\n");
+ dev_info(&pf->pdev->dev, " corer\n");
+ dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " lldp start\n");
+ dev_info(&pf->pdev->dev, " lldp stop\n");
+ dev_info(&pf->pdev->dev, " lldp get local\n");
+ dev_info(&pf->pdev->dev, " lldp get remote\n");
+ dev_info(&pf->pdev->dev, " lldp event on\n");
+ dev_info(&pf->pdev->dev, " lldp event off\n");
+ dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
+ }
+
+command_write_done:
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+ kfree(print_buf_start);
+ print_buf = NULL;
+ print_buf_start = NULL;
+ return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_command_read,
+ .write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partal reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_netdev_ops_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ int vsi_seid;
+ int i, cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+ return -ENOSPC;
+
+ memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+ bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+ buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ else if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ i40e_dbg_netdev_ops_buf[count] = '\0';
+
+ if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "tx_timeout: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "tx_timeout called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ int mtu;
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ &vsi_seid, &mtu);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "change_mtu: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+ mtu);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "change_mtu called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "set_rx_mode: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+ vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i].napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+ i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
+ dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
+ }
+netdev_ops_write_done:
+ return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_netdev_ops_read,
+ .write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the pf
+ * @pf: the pf that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+ struct dentry *pfile __attribute__((unused));
+ const char *name = pci_name(pf->pdev);
+
+ pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+ if (pf->i40e_dbg_pf) {
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_command_fops);
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_netdev_ops_fops);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "debugfs entry for %s failed\n", name);
+ }
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the pf's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ pf->i40e_dbg_pf = NULL;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+ i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+ if (!i40e_dbg_root)
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+ debugfs_remove_recursive(i40e_dbg_root);
+ i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 00000000000..de255143bde
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+ __func__, reg, pat, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+ __func__, reg, orig_val, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return 0;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ reg = i40e_reg_list[i].offset +
+ (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((!ret_code) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
+ ret_code = i40e_validate_nvm_checksum(hw, NULL);
+ } else {
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 00000000000..3d98277f452
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0,
+ I40E_LB_MODE_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 00000000000..9a76b8cec76
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,1449 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+
+struct i40e_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define I40E_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+#define I40E_NETDEV_STAT(_net_stat) \
+ I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#define I40E_PF_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_vsi, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+ I40E_NETDEV_STAT(rx_packets),
+ I40E_NETDEV_STAT(tx_packets),
+ I40E_NETDEV_STAT(rx_bytes),
+ I40E_NETDEV_STAT(tx_bytes),
+ I40E_NETDEV_STAT(rx_errors),
+ I40E_NETDEV_STAT(tx_errors),
+ I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(tx_dropped),
+ I40E_NETDEV_STAT(multicast),
+ I40E_NETDEV_STAT(collisions),
+ I40E_NETDEV_STAT(rx_length_errors),
+ I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate. This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port. The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static struct i40e_stats i40e_gstrings_stats[] = {
+ I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
+ I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("crc_errors", stats.crc_errors),
+ I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+};
+
+#define I40E_QUEUE_STATS_LEN(n) \
+ ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
+ ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_QUEUE_STATS_LEN((n)))
+#define I40E_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
+ / sizeof(u64))
+#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
+ I40E_PFC_STATS_LEN + \
+ I40E_VSI_STATS_LEN((n)))
+
+enum i40e_ethtool_test_id {
+ I40E_ETH_TEST_REG = 0,
+ I40E_ETH_TEST_EEPROM,
+ I40E_ETH_TEST_INTR,
+ I40E_ETH_TEST_LOOPBACK,
+ I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Eeprom test (offline)",
+ "Interrupt test (offline)",
+ "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 link_speed = hw_link_info->link_speed;
+
+ /* hardware is either in 40G mode or 10G mode
+ * NOTE: this section initializes supported and advertising
+ */
+ switch (hw_link_info->phy_type) {
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ecmd->supported = SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseLR4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ecmd->supported = SUPPORTED_10000baseKX4_Full;
+ ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ecmd->supported = SUPPORTED_10000baseKR_Full;
+ ecmd->advertising = ADVERTISED_10000baseKR_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ default:
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ break;
+ }
+
+ /* for now just say autoneg all the time */
+ ecmd->supported |= SUPPORTED_Autoneg;
+
+ if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ ecmd->supported |= SUPPORTED_Backplane;
+ ecmd->advertising |= ADVERTISED_Backplane;
+ ecmd->port = PORT_NONE;
+ } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (link_up) {
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ /* need a SPEED_40000 in ethtool.h */
+ ethtool_cmd_speed_set(ecmd, 40000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_pauseparam - Get Flow Control status
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ pause->autoneg =
+ ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ pause->rx_pause = 1;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
+ pause->tx_pause = 1;
+}
+
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (I40E_DEBUG_USER & data)
+ pf->hw.debug_mask = data;
+ pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+ int reg_count = 0;
+ int i;
+
+ for (i = 0; i40e_reg_list[i].offset != 0; i++)
+ reg_count += i40e_reg_list[i].elements;
+
+ return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 *reg_buf = p;
+ int i, j, ri;
+ u32 reg;
+
+ /* Tell ethtool which driver-version-specific regs output we have.
+ *
+ * At some point, if we have ethtool doing special formatting of
+ * this data, it will rely on this version number to know how to
+ * interpret things. Hence, this needs to be updated if/when the
+ * diags register table is changed.
+ */
+ regs->version = 1;
+
+ /* loop through the diags reg table for what to print */
+ ri = 0;
+ for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+ for (j = 0; j < i40e_reg_list[i].elements; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ reg_buf[ri++] = rd32(hw, reg);
+ }
+ }
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ int first_word, last_word;
+ u16 i, eeprom_len;
+ u16 *eeprom_buff;
+ int ret_val = 0;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_len = last_word - first_word + 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
+ eeprom_buff);
+ if (eeprom_len == 0) {
+ kfree(eeprom_buff);
+ return -EACCES;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < eeprom_len; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+
+ return hw->nvm.sr_size * 2;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, i40e_driver_version_str,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = vsi->rx_rings[0].count;
+ ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == vsi->tx_rings[0].count) &&
+ (new_rx_count == vsi->rx_rings[0].count))
+ return 0;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(vsi->netdev)) {
+ /* simple case - set for the next time the netdev is started */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ vsi->tx_rings[i].count = new_tx_count;
+ vsi->rx_rings[i].count = new_rx_count;
+ }
+ goto done;
+ }
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+
+ /* alloc updated Tx resources */
+ if (new_tx_count != vsi->tx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Tx descriptor count from %d to %d.\n",
+ vsi->tx_rings[0].count, new_tx_count);
+ tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!tx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i].count = new_tx_count;
+ err = i40e_setup_tx_descriptors(&tx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_tx_resources(&tx_rings[i]);
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+
+ goto done;
+ }
+ }
+ }
+
+ /* alloc updated Rx resources */
+ if (new_rx_count != vsi->rx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Rx descriptor count from %d to %d\n",
+ vsi->rx_rings[0].count, new_rx_count);
+ rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i].count = new_rx_count;
+ err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_rx_resources(&rx_rings[i]);
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+
+ goto free_tx;
+ }
+ }
+ }
+
+ /* Bring interface down, copy in the new ring info,
+ * then restore the interface
+ */
+ i40e_down(vsi);
+
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+ vsi->tx_rings[i] = tx_rings[i];
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+ if (rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+ vsi->rx_rings[i] = rx_rings[i];
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+ }
+
+ i40e_up(vsi);
+
+free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_free_tx_resources(&tx_rings[i]);
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+done:
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+
+ return err;
+}
+
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return I40E_TEST_LEN;
+ case ETH_SS_STATS:
+ if (vsi == pf->vsi[pf->lan_vsi])
+ return I40E_PF_STATS_LEN(netdev);
+ else
+ return I40E_VSI_STATS_LEN(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i = 0;
+ char *p;
+ int j;
+ struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+
+ i40e_update_stats(vsi);
+
+ for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
+ p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->tx_rings[j].tx_stats.packets;
+ data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->rx_rings[j].rx_stats.packets;
+ data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
+ }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < I40E_TEST_LEN; i++) {
+ memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_net_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static int i40e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+{
+ if (i40e_get_link_status(&pf->hw))
+ *data = 0;
+ else
+ *data = 1;
+
+ return *data;
+}
+
+static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_reg_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_eeprom_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ set_bit(__I40E_TESTING, &pf->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ netdev_info(netdev, "offline testing starting\n");
+
+ /* Link test performed before hardware reset
+ * so autoneg doesn't interfere with test result
+ */
+ netdev_info(netdev, "link test starting\n");
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ netdev_info(netdev, "register test starting\n");
+ if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "eeprom test starting\n");
+ if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "interrupt test starting\n");
+ if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "loopback test starting\n");
+ if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ } else {
+ netdev_info(netdev, "online test starting\n");
+ /* Online tests */
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[I40E_ETH_TEST_REG] = 0;
+ data[I40E_ETH_TEST_EEPROM] = 0;
+ data[I40E_ETH_TEST_INTR] = 0;
+ data[I40E_ETH_TEST_LOOPBACK] = 0;
+
+ clear_bit(__I40E_TESTING, &pf->state);
+ }
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+ /* restart autonegotiation */
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret = 0;
+
+ ret = i40e_aq_set_link_restart_an(hw, NULL);
+ if (ret) {
+ netdev_info(netdev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i40e_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int blink_freq = 2;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ pf->led_status = i40e_led_get(hw);
+ return blink_freq;
+ case ETHTOOL_ID_ON:
+ i40e_led_set(hw, 0xF);
+ break;
+ case ETHTOOL_ID_OFF:
+ i40e_led_set(hw, 0x0);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ i40e_led_set(hw, pf->led_status);
+ break;
+ }
+
+ return 0;
+}
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+static int i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+ ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_q_vector *q_vector;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on i40e */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vsi->alloc_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40e_get_rss_hash_opts(pf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = 500;
+ ret = 0;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+#define IP_HEADER_OFFSET 14
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ udp->source = fsp->h_u.tcp_ip4_spec.psrc;
+ udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @fsp: the ethtool flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
+ ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
+ * a specific flow spec based on their protocol
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd, bool add)
+{
+ struct i40e_fdir_data fd_data;
+ int ret = -EINVAL;
+ struct i40e_pf *pf;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
+ return -EINVAL;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ dev_info(&pf->pdev->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ fd_data.q_index = fsp->ring_cookie;
+ fd_data.flex_off = 0;
+ fd_data.pctype = 0;
+ fd_data.dest_vsi = vsi->id;
+ fd_data.dest_ctl = 0;
+ fd_data.fd_status = 0;
+ fd_data.cnt_index = 0;
+ fd_data.fd_id = 0;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type\n");
+ ret = -EINVAL;
+ }
+
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+
+ return ret;
+}
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40e_set_rss_hash_opt(pf, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+ .get_settings = i40e_get_settings,
+ .get_drvinfo = i40e_get_drvinfo,
+ .get_regs_len = i40e_get_regs_len,
+ .get_regs = i40e_get_regs,
+ .nway_reset = i40e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_wol = i40e_get_wol,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+ .get_ringparam = i40e_get_ringparam,
+ .set_ringparam = i40e_set_ringparam,
+ .get_pauseparam = i40e_get_pauseparam,
+ .get_msglevel = i40e_get_msglevel,
+ .set_msglevel = i40e_set_msglevel,
+ .get_rxnfc = i40e_get_rxnfc,
+ .set_rxnfc = i40e_set_rxnfc,
+ .self_test = i40e_diag_test,
+ .get_strings = i40e_get_strings,
+ .set_phys_id = i40e_set_phys_id,
+ .get_sset_count = i40e_get_sset_count,
+ .get_ethtool_stats = i40e_get_ethtool_stats,
+ .get_coalesce = i40e_get_coalesce,
+ .set_coalesce = i40e_set_coalesce,
+ .get_ts_info = i40e_get_ts_info,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 00000000000..901804af8b0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,366 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_memory_type mem_type __attribute__((unused));
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+
+ memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = mem.pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ memcpy(pd_addr, &page_desc, sizeof(u64));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (is_pf)
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+ else
+ I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ i40e_status ret_code = 0;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 00000000000..aacd42a261e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: hmc function id
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 00000000000..a695b91c9c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1006 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ i40e_status ret_code = 0;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1), true);
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ break;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ i40e_remove_pd_bp(
+ hw,
+ info->hmc_info,
+ i,
+ true);
+ }
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ i40e_status ret_code = 0;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code) &&
+ (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ break;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
+ j, true);
+ if (ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ i40e_status ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { 0 }
+};
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+
+ return 0;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 shift_width;
+ u64 bitfield;
+ u8 hi_byte;
+ u8 hi_mask;
+ u64 t_bits;
+ u64 mask;
+ u8 *p;
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ /* clear out the field */
+ bitfield = 0;
+
+ /* copy from the next struct field */
+ p = dest + ce_info[f].offset;
+ switch (ce_info[f].size_of) {
+ case 1:
+ bitfield = *p;
+ break;
+ case 2:
+ bitfield = cpu_to_le16(*(u16 *)p);
+ break;
+ case 4:
+ bitfield = cpu_to_le32(*(u32 *)p);
+ break;
+ case 8:
+ bitfield = cpu_to_le64(*(u64 *)p);
+ break;
+ }
+
+ /* prepare the bits and mask */
+ shift_width = ce_info[f].lsb % 8;
+ mask = ((u64)1 << ce_info[f].width) - 1;
+
+ /* save upper bytes for special case */
+ hi_mask = (u8)((mask >> 56) & 0xff);
+ hi_byte = (u8)((bitfield >> 56) & 0xff);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ bitfield <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ p = context_bytes + (ce_info[f].lsb / 8);
+ memcpy(&t_bits, p, sizeof(u64));
+
+ t_bits &= ~mask; /* get the bits not changing */
+ t_bits |= bitfield; /* add in the new bits */
+
+ /* put it all back */
+ memcpy(p, &t_bits, sizeof(u64));
+
+ /* deal with the special case if needed
+ * example: 62 bit field that starts in bit 5 of first byte
+ * will overlap 3 bits into byte 9
+ */
+ if ((shift_width + ce_info[f].width) > 64) {
+ u8 byte;
+
+ hi_mask >>= (8 - shift_width);
+ hi_byte >>= (8 - shift_width);
+ byte = p[8] & ~hi_mask; /* get the bits not changing */
+ byte |= hi_byte; /* add in the new bits */
+ p[8] = byte; /* put it back */
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 00000000000..00ff3500607
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 00000000000..601d482694e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7375 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Local includes */
+#include "i40e.h"
+
+const char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+ "Intel(R) Ethernet Connection XL710 Network Driver";
+
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 9
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." \
+ __stringify(DRV_VERSION_BUILD) DRV_KERN
+const char i40e_driver_version_str[] = DRV_VERSION;
+static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+ mem->va = NULL;
+ mem->pa = 0;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size)
+{
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+ mem->va = NULL;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ *
+ * The search_hint trick and lack of advanced fit-finding only work
+ * because we're highly likely to have all the same size lump requests.
+ * Linear search time and any fragmentation should be minimal.
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+ u16 needed, u16 id)
+{
+ int ret = -ENOMEM;
+ int i = 0;
+ int j = 0;
+
+ if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+ dev_info(&pf->pdev->dev,
+ "param err: pile=%p needed=%d id=0x%04x\n",
+ pile, needed, id);
+ return -EINVAL;
+ }
+
+ /* start the linear search with an imperfect hint */
+ i = pile->search_hint;
+ while (i < pile->num_entries && ret < 0) {
+ /* skip already allocated entries */
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ i++;
+ continue;
+ }
+
+ /* do we have enough in this lump? */
+ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+ if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+ break;
+ }
+
+ if (j == needed) {
+ /* there was enough, so assign it to the requestor */
+ for (j = 0; j < needed; j++)
+ pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+ ret = i;
+ pile->search_hint = i + j;
+ } else {
+ /* not enough, so skip over it and continue looking */
+ i += j;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+ int valid_id = (id | I40E_PILE_VALID_BIT);
+ int count = 0;
+ int i;
+
+ if (!pile || index >= pile->num_entries)
+ return -EINVAL;
+
+ for (i = index;
+ i < pile->num_entries && pile->list[i] == valid_id;
+ i++) {
+ pile->list[i] = 0;
+ count++;
+ }
+
+ if (count && index < pile->search_hint)
+ pile->search_hint = index;
+
+ return count;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+static void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+ if (!test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
+ !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ schedule_work(&pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+static void i40e_tx_timeout(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ pf->tx_timeout_count++;
+
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+ pf->tx_timeout_recovery_level = 0;
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d\n",
+ pf->tx_timeout_recovery_level);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 0:
+ /* disable and re-enable queues for the VSI */
+ if (in_interrupt()) {
+ set_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ } else {
+ i40e_vsi_reinit_locked(vsi);
+ }
+ break;
+ case 1:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ case 2:
+ set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ break;
+ case 3:
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ i40e_down(vsi);
+ break;
+ }
+ i40e_service_event_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+ return &vsi->net_stats;
+}
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+ struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ *storage = *i40e_get_vsi_stats_struct(vsi);
+
+ return storage;
+}
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+ struct rtnl_link_stats64 *ns;
+ int i;
+
+ if (!vsi)
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ memset(ns, 0, sizeof(*ns));
+ memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+ memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+ memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+ if (vsi->rx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memset(&vsi->rx_rings[i].rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i].rx_stats));
+ memset(&vsi->tx_rings[i].tx_stats, 0,
+ sizeof(vsi->tx_rings[i].tx_stats));
+ }
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+ memset(&pf->stats, 0, sizeof(pf->stats));
+ memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero. In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ } else {
+ new_data = rd64(hw, loreg);
+ }
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+ int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+static void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ int idx = 0;
+
+ idx = veb->stats_idx;
+ es = &veb->stats;
+ oes = &veb->stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_discards, &es->tx_discards);
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+ i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ veb->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
+ **/
+static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u64 xoff = 0;
+ u16 i, v;
+
+ if ((hw->fc.current_mode != I40E_FC_FULL) &&
+ (hw->fc.current_mode != I40E_FC_RX_PAUSE))
+ return;
+
+ xoff = nsd->link_xoff_rx;
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+
+ /* No new LFC xoff rx */
+ if (!(nsd->link_xoff_rx - xoff))
+ return;
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in PFC mode
+ **/
+static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u16 i, v;
+ u8 tc;
+
+ dcb_cfg = &hw->local_dcbx_config;
+
+ /* See if DCB enabled with PFC TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
+ !(dcb_cfg->pfc.pfcenable)) {
+ i40e_update_link_xoff_rx(pf);
+ return;
+ }
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u64 prio_xoff = nsd->priority_xoff_rx[i];
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_rx[i],
+ &nsd->priority_xoff_rx[i]);
+
+ /* No new PFC xoff rx */
+ if (!(nsd->priority_xoff_rx[i] - prio_xoff))
+ continue;
+ /* Get the TC for given priority */
+ tc = dcb_cfg->etscfg.prioritytable[i];
+ xoff[tc] = true;
+ }
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+
+ tc = ring->dcb_tc;
+ if (xoff[tc])
+ clear_bit(__I40E_HANG_CHECK_ARMED,
+ &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_stats - Update the board statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs. This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications. We sort it all out here in a central place.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct rtnl_link_stats64 *ons;
+ struct rtnl_link_stats64 *ns; /* netdev stats */
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ u32 tx_restart, tx_busy;
+ u32 rx_page, rx_buf;
+ u64 rx_p, rx_b;
+ u64 tx_p, tx_b;
+ int i;
+ u16 q;
+
+ if (test_bit(__I40E_DOWN, &vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ ons = &vsi->net_stats_offsets;
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the netdev and vsi stats that the driver collects
+ * on the fly during packet processing
+ */
+ rx_b = rx_p = 0;
+ tx_b = tx_p = 0;
+ tx_restart = tx_busy = 0;
+ rx_page = 0;
+ rx_buf = 0;
+ for (q = 0; q < vsi->num_queue_pairs; q++) {
+ struct i40e_ring *p;
+
+ p = &vsi->rx_rings[q];
+ rx_b += p->rx_stats.bytes;
+ rx_p += p->rx_stats.packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
+
+ p = &vsi->tx_rings[q];
+ tx_b += p->tx_stats.bytes;
+ tx_p += p->tx_stats.packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ }
+ vsi->tx_restart = tx_restart;
+ vsi->tx_busy = tx_busy;
+ vsi->rx_page_failed = rx_page;
+ vsi->rx_buf_failed = rx_buf;
+
+ ns->rx_packets = rx_p;
+ ns->rx_bytes = rx_b;
+ ns->tx_packets = tx_p;
+ ns->tx_bytes = tx_b;
+
+ i40e_update_eth_stats(vsi);
+ /* update netdev stats from eth stats */
+ ons->rx_errors = oes->rx_errors;
+ ns->rx_errors = es->rx_errors;
+ ons->tx_errors = oes->tx_errors;
+ ns->tx_errors = es->tx_errors;
+ ons->multicast = oes->rx_multicast;
+ ns->multicast = es->rx_multicast;
+ ons->tx_dropped = oes->tx_discards;
+ ns->tx_dropped = es->tx_discards;
+
+ /* Get the port data only if this is the main PF VSI */
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ns->rx_crc_errors = nsd->crc_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ns->rx_errors = nsd->crc_errors
+ + nsd->illegal_bytes;
+
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+ ns->rx_length_errors = nsd->rx_length_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
+ }
+
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ }
+
+ pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (vlan == f->vlan) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+
+ /* Only -1 for all the filters denotes not in vlan mode
+ * so we have to go through all the list in order to make sure
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ * @is_vf: true if it is a vf
+ * @is_netdev: true if it is a netdev
+ *
+ * Goes through all the macvlan filters and adds a
+ * macvlan filter for each unique vlan that already exists
+ *
+ * Returns first filter found on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (!i40e_find_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev)) {
+ if (!i40e_add_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev))
+ return NULL;
+ }
+ }
+
+ return list_first_entry_or_null(&vsi->mac_filter_list,
+ struct i40e_mac_filter, list);
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ goto add_filter_out;
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+ f->vlan = vlan;
+ f->changed = true;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &vsi->mac_filter_list);
+ }
+
+ /* increment counter and add a new flag if needed */
+ if (is_vf) {
+ if (!f->is_vf) {
+ f->is_vf = true;
+ f->counter++;
+ }
+ } else if (is_netdev) {
+ if (!f->is_netdev) {
+ f->is_netdev = true;
+ f->counter++;
+ }
+ } else {
+ f->counter++;
+ }
+
+ /* changed tells sync_filters_subtask to
+ * push the filter down to the firmware
+ */
+ if (f->changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+add_filter_out:
+ return f;
+}
+
+/**
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f || f->counter == 0)
+ return;
+
+ if (is_vf) {
+ if (f->is_vf) {
+ f->is_vf = false;
+ f->counter--;
+ }
+ } else if (is_netdev) {
+ if (f->is_netdev) {
+ f->is_netdev = false;
+ f->counter--;
+ }
+ } else {
+ /* make sure we don't remove a filter in use by vf or netdev */
+ int min_f = 0;
+ min_f += (f->is_vf ? 1 : 0);
+ min_f += (f->is_netdev ? 1 : 0);
+
+ if (f->counter > min_f)
+ f->counter--;
+ }
+
+ /* counter == 0 tells sync_filters_subtask to
+ * remove the filter from the firmware's list
+ */
+ if (f->counter == 0) {
+ f->changed = true;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct sockaddr *addr = p;
+ struct i40e_mac_filter *f;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ i40e_status ret;
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ addr->sa_data, NULL);
+ if (ret) {
+ netdev_info(netdev,
+ "Addr change for Main VSI failed: %d\n",
+ ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ }
+
+ /* In order to be sure to not drop any packets, add the new address
+ * then delete the old one.
+ */
+ f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
+ if (!f)
+ return -ENOMEM;
+
+ i40e_sync_vsi_filters(vsi);
+ i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
+ i40e_sync_vsi_filters(vsi);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc,
+ bool is_add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u16 sections = 0;
+ u8 netdev_tc = 0;
+ u16 numtc = 0;
+ u16 qcount;
+ u8 offset;
+ u16 qmap;
+ int i;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ offset = 0;
+
+ if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Find numtc from enabled TC bitmap */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i)) /* TC is enabled */
+ numtc++;
+ }
+ if (!numtc) {
+ dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+ numtc = 1;
+ }
+ } else {
+ /* At least TC0 is enabled in case of non-DCB case */
+ numtc = 1;
+ }
+
+ vsi->tc_config.numtc = numtc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ int pow, num_qps;
+
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (i == 0)
+ qcount = pf->rss_size;
+ else
+ qcount = pf->num_tc_qps;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ break;
+ case I40E_VSI_FDIR:
+ case I40E_VSI_SRIOV:
+ case I40E_VSI_VMDQ2:
+ default:
+ qcount = vsi->alloc_queue_pairs;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ WARN_ON(i != 0);
+ break;
+ }
+
+ /* find the power-of-2 of the number of queue pairs */
+ num_qps = vsi->tc_config.tc_info[i].qcount;
+ pow = 0;
+ while (num_qps &&
+ ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ pow++;
+ num_qps >>= 1;
+ }
+
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ qmap =
+ (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ offset += vsi->tc_config.tc_info[i].qcount;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+ qmap = 0;
+ }
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_queue_pairs = offset;
+
+ /* Scheduler section valid can only be set for ADD VSI */
+ if (is_add) {
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ ctxt->info.up_enable_bits = enabled_tc;
+ }
+ if (vsi->type == I40E_VSI_SRIOV) {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ ctxt->info.queue_mapping[i] =
+ cpu_to_le16(vsi->base_queue + i);
+ } else {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ }
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40e_set_rx_mode(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+ struct netdev_hw_addr *ha;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (!i40e_find_mac(vsi, uca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, uca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (!i40e_find_mac(vsi, mca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, mca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ bool found = false;
+
+ if (!f->is_netdev)
+ continue;
+
+ if (is_multicast_ether_addr(f->macaddr)) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+
+ for_each_dev_addr(netdev, ha) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ i40e_del_filter(
+ vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+ }
+
+ /* check for other flag changes */
+ if (vsi->current_netdev_flags != vsi->netdev->flags) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ bool promisc_forced_on = false;
+ bool add_happened = false;
+ int filter_list_len = 0;
+ u32 changed_flags = 0;
+ i40e_status ret = 0;
+ struct i40e_pf *pf;
+ int num_add = 0;
+ int num_del = 0;
+ u16 cmd_flags;
+
+ /* empty array typed pointers, kcalloc later */
+ struct i40e_aqc_add_macvlan_element_data *add_list;
+ struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ usleep_range(1000, 2000);
+ pf = vsi->back;
+
+ if (vsi->netdev) {
+ changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+ vsi->current_netdev_flags = vsi->netdev->flags;
+ }
+
+ if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+ vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter != 0)
+ continue;
+ f->changed = false;
+ cmd_flags = 0;
+
+ /* add to delete list */
+ memcpy(del_list[num_del].mac_addr,
+ f->macaddr, ETH_ALEN);
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan ==
+ I40E_VLAN_ANY ? 0 : f->vlan));
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY ||
+ (vsi->netdev && !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ del_list[num_del].flags = cmd_flags;
+ num_del++;
+
+ /* unlink from filter list */
+ list_del(&f->list);
+ kfree(f);
+
+ /* flush a full buffer */
+ if (num_del == filter_list_len) {
+ ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ num_del = 0;
+ memset(del_list, 0, sizeof(*del_list));
+
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
+ ret,
+ pf->hw.aq.asq_last_status);
+ }
+ }
+ if (num_del) {
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ del_list, num_del, NULL);
+ num_del = 0;
+
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+
+ kfree(del_list);
+ del_list = NULL;
+
+ /* do all the adds now */
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ add_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ GFP_KERNEL);
+ if (!add_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+ add_happened = true;
+ cmd_flags = 0;
+
+ /* add to add array */
+ memcpy(add_list[num_add].mac_addr,
+ f->macaddr, ETH_ALEN);
+ add_list[num_add].vlan_tag =
+ cpu_to_le16(
+ (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ add_list[num_add].queue_number = 0;
+
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
+ !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ add_list[num_add].flags = cpu_to_le16(cmd_flags);
+ num_add++;
+
+ /* flush a full buffer */
+ if (num_add == filter_list_len) {
+ ret = i40e_aq_add_macvlan(&pf->hw,
+ vsi->seid,
+ add_list,
+ num_add,
+ NULL);
+ num_add = 0;
+
+ if (ret)
+ break;
+ memset(add_list, 0, sizeof(*add_list));
+ }
+ }
+ if (num_add) {
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ num_add = 0;
+ }
+ kfree(add_list);
+ add_list = NULL;
+
+ if (add_happened && (!ret)) {
+ /* do nothing */;
+ } else if (add_happened && (ret)) {
+ dev_info(&pf->pdev->dev,
+ "add filter failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+ !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ promisc_forced_on = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+ }
+ }
+ }
+
+ /* check for changes in promiscuous modes */
+ if (changed_flags & IFF_ALLMULTI) {
+ bool cur_multipromisc;
+ cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+ ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set multi promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ bool cur_promisc;
+ cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+ test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state));
+ ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc,
+ NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set uni promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return 0;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+ int v;
+
+ if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+ return;
+ pf->flags &= ~I40E_FLAG_FILTER_SYNC;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] &&
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
+ i40e_sync_vsi_filters(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ struct i40e_vsi *vsi = np->vsi;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ netdev_info(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ i40e_vsi_reinit_locked(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+ return; /* already enabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK))
+ return; /* already disabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @features: netdev features to test if VLAN offload is enabled or not
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct i40e_mac_filter *f, *add_f;
+ bool is_netdev, is_vf;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(vsi->netdev);
+
+ if (is_netdev) {
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not sync filters for vid %d\n", vid);
+ return ret;
+ }
+
+ /* Now if we add a vlan tag, make sure to check if it is the first
+ * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
+ * with 0, so we now accept untagged and specified tagged traffic
+ * (and not any taged and untagged)
+ */
+ if (vid > 0) {
+ if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+ ret = i40e_sync_vsi_filters(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_mac_filter *f, *add_f;
+ bool is_vf, is_netdev;
+ int filter_count = 0;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(netdev);
+
+ if (is_netdev)
+ i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
+ return ret;
+ }
+
+ /* go through all the filters for this VSI and if there is only
+ * vid == 0 it means there are no other filters, so vid 0 must
+ * be replaced with -1. This signifies that we should from now
+ * on accept any traffic (with any tag present, or untagged)
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (is_netdev) {
+ if (f->vlan &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ filter_count++;
+ }
+
+ if (f->vlan)
+ filter_count++;
+ }
+
+ if (!filter_count && is_netdev) {
+ i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
+ f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ if (!filter_count) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return i40e_sync_vsi_filters(vsi);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be added
+ **/
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ int ret;
+
+ if (vid > 4095)
+ return 0;
+
+ netdev_info(vsi->netdev, "adding %pM vid=%d\n",
+ netdev->dev_addr, vid);
+ /* If the network stack called us with vid = 0, we should
+ * indicate to i40e_vsi_add_vlan() that we want to receive
+ * any traffic (i.e. with any vlan tag, or untagged)
+ */
+ ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+
+ if (!ret) {
+ if (vid < VLAN_N_VID)
+ set_bit(vid, vsi->active_vlans);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be removed
+ **/
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ netdev_info(vsi->netdev, "removing %pM vid=%d\n",
+ netdev->dev_addr, vid);
+ /* return code is ignored as there is nothing a user
+ * can do about failure to remove and a log message was
+ * already printed from another function
+ */
+ i40e_vsi_kill_vlan(vsi, vid);
+
+ clear_bit(vid, vsi->active_vlans);
+ return 0;
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+ u16 vid;
+
+ if (!vsi->netdev)
+ return;
+
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+
+ for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+ i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = cpu_to_le16(vid);
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+ vsi->info.pvid = 0;
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+}
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i].desc)
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->rx_rings[i].desc)
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ i40e_status err = 0;
+ u32 qtx_ctl = 0;
+
+ /* some ATR related tx ring init */
+ if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ ring->atr_count = 0;
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ /* initialize XPS */
+ if (ring->q_vector && ring->netdev &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->affinity_mask,
+ ring->queue_index);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+ tx_ctx.new_context = 1;
+ tx_ctx.base = (ring->dma / 128);
+ tx_ctx.qlen = ring->count;
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED));
+
+ /* As part of VSI creation/update, FW allocates certain
+ * Tx arbitration queue sets for each TC enabled for
+ * the VSI. The FW returns the handles to these queue
+ * sets as part of the response buffer to Add VSI,
+ * Update VSI, etc. AQ commands. It is expected that
+ * these queue set handles be associated with the Tx
+ * queues by the driver as part of the TX queue context
+ * initialization. This has to be done regardless of
+ * DCB as by default everything is mapped to TC0.
+ */
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ i40e_flush(hw);
+
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+
+ /* cache tail off for easier writes later */
+ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ i40e_status err = 0;
+
+ ring->state = 0;
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+ ring->rx_hdr_len = vsi->rx_hdr_len;
+
+ rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = (ring->dma / 128);
+ rx_ctx.qlen = ring->count;
+
+ if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
+ set_ring_16byte_desc_enabled(ring);
+ rx_ctx.dsize = 0;
+ } else {
+ rx_ctx.dsize = 1;
+ }
+
+ rx_ctx.dtype = vsi->dtype;
+ if (vsi->dtype) {
+ set_ring_ps_enabled(ring);
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ } else {
+ rx_ctx.hsplit_0 = 0;
+ }
+
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame,
+ (chain_len * ring->rx_buf_len));
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* cache tail for quicker writes, and clear the reg before use */
+ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
+ err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
+ vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
+ + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = I40E_RXBUFFER_2048;
+
+ /* figure out correct receive buffer length */
+ switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED)) {
+ case I40E_FLAG_RX_1BUF_ENABLED:
+ vsi->rx_hdr_len = 0;
+ vsi->rx_buf_len = vsi->max_frame;
+ vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+ break;
+ case I40E_FLAG_RX_PS_ENABLED:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+ break;
+ default:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
+ break;
+ }
+
+ /* round up for the chip's needs */
+ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+ if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ continue;
+
+ qoffset = vsi->tc_config.tc_info[n].qoffset;
+ qcount = vsi->tc_config.tc_info[n].qcount;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ rx_ring->dcb_tc = n;
+ tx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+ if (vsi->netdev)
+ i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+ int err;
+
+ i40e_set_vsi_rx_mode(vsi);
+ i40e_restore_vlan(vsi);
+ i40e_vsi_config_dcb_rings(vsi);
+ err = i40e_vsi_configure_tx(vsi);
+ if (!err)
+ err = i40e_vsi_configure_rx(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_q_vector *q_vector;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i, q;
+ u32 val;
+ u32 qp;
+
+ /* The interrupt indexing is offset by 1 in the PFINT_ITRn
+ * and PFINT_LNKLSTn registers, e.g.:
+ * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
+ */
+ qp = vsi->base_queue;
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+ q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+ q_vector->tx.itr);
+
+ /* Linked list for the queuepairs assigned to this vector */
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+ for (q = 0; q < q_vector->num_ringpairs; q++) {
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_RX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ /* Terminate the linked list */
+ if (q == (q_vector->num_ringpairs - 1))
+ val |= (I40E_QUEUE_END_OF_LIST
+ << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp++;
+ }
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @hw: ptr to the hardware info
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+{
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+ I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+
+ /* OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ /* set the ITR configuration */
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(0), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(0), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ pr_info("fdir ring cleaning needed\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+ int q_vectors = vsi->num_q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ int vector, err;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+
+ if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(pf->msix_entries[base + vector].vector,
+ vsi->irq_handler,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ &q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[base + vector].vector,
+ &(vsi->q_vectors[vector]));
+ }
+ return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ }
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+ i40e_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ i40e_irq_dynamic_enable(vsi, i);
+ } else {
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_stop_misc_vector(struct i40e_pf *pf)
+{
+ /* Disable ICR 0 */
+ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+ i40e_flush(&pf->hw);
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts. This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+ struct i40e_hw *hw = &pf->hw;
+ u32 icr0, icr0_remaining;
+ u32 val, ena_mask;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
+ val = rd32(hw, I40E_PFINT_DYN_CTL0);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+
+ /* temporarily disable queue cause for NAPI processing */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+ val = rd32(hw, I40E_GLGEN_RSTAT);
+ val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ if (val & I40E_RESET_CORER)
+ pf->corer_count++;
+ else if (val & I40E_RESET_GLOBR)
+ pf->globr_count++;
+ else if (val & I40E_RESET_EMPR)
+ pf->empr_count++;
+ }
+
+ /* If a critical error is pending we have no choice but to reset the
+ * device.
+ * Report and mask out any remaining unexpected interrupts.
+ */
+ icr0_remaining = icr0 & ena_mask;
+ if (icr0_remaining) {
+ dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+ icr0_remaining);
+ if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ } else {
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ }
+ }
+ ena_mask &= ~icr0_remaining;
+ }
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+ i40e_flush(hw);
+ if (!test_bit(__I40E_DOWN, &pf->state)) {
+ i40e_service_event_schedule(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @r_idx: rx queue index
+ **/
+static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
+
+ rx_ring->q_vector = q_vector;
+ q_vector->rx.ring[q_vector->rx.count] = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @t_idx: tx queue index
+ **/
+static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+
+ tx_ring->q_vector = q_vector;
+ q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+ int qp_remaining = vsi->num_queue_pairs;
+ int q_vectors = vsi->num_q_vectors;
+ int qp_per_vector;
+ int v_start = 0;
+ int qp_idx = 0;
+
+ /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ */
+ for (; v_start < q_vectors && qp_remaining; v_start++) {
+ qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+ for (; qp_per_vector;
+ qp_per_vector--, qp_idx++, qp_remaining--) {
+ map_vector_to_rxq(vsi, v_start, qp_idx);
+ map_vector_to_txq(vsi, v_start, qp_idx);
+ }
+ }
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ err = i40e_vsi_request_irq_msix(vsi, basename);
+ else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ err = request_irq(pf->pdev->irq, i40e_intr, 0,
+ pf->misc_int_name, pf);
+ else
+ err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+ pf->misc_int_name, pf);
+
+ if (err)
+ dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt'handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40e_netpoll(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ pf->flags |= I40E_FLAG_IN_NETPOLL;
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ } else {
+ i40e_intr(pf->pdev->irq, netdev);
+ }
+ pf->flags &= ~I40E_FLAG_IN_NETPOLL;
+}
+#endif
+
+/**
+ * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 tx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
+ ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already enabled\n", i);
+ continue;
+ }
+ } else {
+ /* is !STAT set ? */
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already disabled\n", i);
+ continue;
+ }
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ else
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (enable) {
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 rx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
+ ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ } else {
+ /* is !STAT set ? */
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ else
+ rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK);
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+
+ if (enable) {
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+{
+ int ret;
+
+ /* do rx first for enable and last for disable */
+ if (request) {
+ ret = i40e_vsi_control_rx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, request);
+ } else {
+ ret = i40e_vsi_control_tx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_rx(vsi, request);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u32 val, qp;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (!vsi->q_vectors)
+ return;
+
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+
+ /* free only the irqs that were actually requested */
+ if (vsi->q_vectors[i].num_ringpairs == 0)
+ continue;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(pf->msix_entries[vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[vector].vector,
+ &vsi->q_vectors[i]);
+
+ /* Tear down the interrupt queue link list
+ *
+ * We know that they come in pairs and always
+ * the Rx first, then the Tx. To clear the
+ * link list, stick the EOL value into the
+ * next_q field of the registers.
+ */
+ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+ while (qp != I40E_QUEUE_END_OF_LIST) {
+ u32 next;
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+ >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp = next;
+ }
+ }
+ } else {
+ free_irq(pf->pdev->irq, pf);
+
+ val = rd32(hw, I40E_PFINT_LNKLST0);
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ }
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
+ struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
+ int r_idx;
+
+ if (!q_vector)
+ continue;
+
+ /* disassociate q_vector from rings */
+ for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
+ q_vector->tx.ring[r_idx]->q_vector = NULL;
+ for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
+ q_vector->rx.ring[r_idx]->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+ }
+ kfree(vsi->q_vectors);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+ /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ pci_disable_msix(pf->pdev);
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pf->pdev);
+ }
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+ int i;
+
+ i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_enable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_disable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev)) {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ } else {
+ set_bit(__I40E_DOWN, &vsi->state);
+ i40e_down(vsi);
+ }
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ return;
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ else
+ i40e_up(vsi); /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_quiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_unquiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ int num_tc = 0, i;
+
+ /* Scan the ETS Config Priority Table to find
+ * traffic class enabled for a given priority
+ * and use the traffic class index to get the
+ * number of traffic classes enabled
+ */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] > num_tc)
+ num_tc = dcbcfg->etscfg.prioritytable[i];
+ }
+
+ /* Traffic class index starts from zero so
+ * increment to return the actual count
+ */
+ num_tc++;
+
+ return num_tc;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+ u8 enabled_tc = 1;
+ u8 i;
+
+ for (i = 0; i < num_tc; i++)
+ enabled_tc |= 1 << i;
+
+ return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 i, enabled_tc;
+ u8 num_tc = 0;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ /* If DCB is not enabled then always in single TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return 1;
+
+ /* MFP mode return count of enabled TCs for this PF */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ num_tc++;
+ }
+ return num_tc;
+ }
+
+ /* SFP mode will be enabled for all TCs on port */
+ return i40e_dcb_get_num_tc(dcbcfg);
+}
+
+/**
+ * i40e_pf_get_default_tc - Get bitmap for first enabled TC
+ * @pf: PF being queried
+ *
+ * Return a bitmap for first enabled traffic class for this PF.
+ **/
+static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
+{
+ u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ u8 i = 0;
+
+ if (!enabled_tc)
+ return 0x1; /* TC0 */
+
+ /* Find the first enabled TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ break;
+ }
+
+ return 1 << i;
+}
+
+/**
+ * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+ /* If DCB is not enabled for this PF then just return default TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return i40e_pf_get_default_tc(pf);
+
+ /* MFP mode will have enabled TCs set by FW */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ return pf->hw.func_caps.enabled_tcmap;
+
+ /* SFP mode we want PF to be enabled for all TCs */
+ return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret;
+ int i;
+
+ /* Get the VSI level BW configuration */
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ /* Get the VSI level BW configuration per TC */
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+ &bw_ets_config,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+ dev_info(&pf->pdev->dev,
+ "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+ bw_config.tc_valid_bits,
+ bw_ets_config.tc_valid_bits);
+ /* Still continuing */
+ }
+
+ vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+ vsi->bw_max_quanta = bw_config.max_bw;
+ tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
+ (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+ vsi->bw_ets_limit_credits[i] =
+ le16_to_cpu(bw_ets_config.credits[i]);
+ /* 3 bits out of 4 for each TC */
+ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+ }
+ return ret;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_credits: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
+ u8 enabled_tc,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ int i, ret = 0;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ return ret;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 netdev_tc = 0;
+ int i;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ if (!netdev)
+ return;
+
+ if (!enabled_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ /* Set up actual enabled TCs on the VSI */
+ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+ return;
+
+ /* set per TC queues for the VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* Only set TC queues for enabled tcs
+ *
+ * e.g. For a VSI that has TC0 and TC3 enabled the
+ * enabled_tc bitmap would be 0x00001001; the driver
+ * will set the numtc for netdev as 2 that will be
+ * referenced by the netdev layer as TC 0 and 1.
+ */
+ if (vsi->tc_config.enabled_tc & (1 << i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_config.tc_info[i].netdev_tc,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].qoffset);
+ }
+
+ /* Assign UP2TC map for the VSI */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ /* Get the actual TC# for the UP */
+ u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt)
+{
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ vsi->info.mapping_flags = ctxt->info.mapping_flags;
+ memcpy(&vsi->info.queue_mapping,
+ &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+ memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_vsi_context ctxt;
+ int ret = 0;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->tc_config.enabled_tc == enabled_tc)
+ return ret;
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_share[i] = 1;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed configuring TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Update current VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed updating vsi bw info, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the netdev TC setup */
+ i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+ return ret;
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_vsi_configure_msix(vsi);
+ else
+ i40e_configure_msi_and_legacy(vsi);
+
+ /* start rings */
+ err = i40e_vsi_control_rings(vsi, true);
+ if (err)
+ return err;
+
+ clear_bit(__I40E_DOWN, &vsi->state);
+ i40e_napi_enable_all(vsi);
+ i40e_vsi_enable_irq(vsi);
+
+ if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+ (vsi->netdev)) {
+ netif_tx_start_all_queues(vsi->netdev);
+ netif_carrier_on(vsi->netdev);
+ }
+ i40e_service_event_schedule(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+ i40e_down(vsi);
+
+ /* Give a VF some time to respond to the reset. The
+ * two second wait is based upon the watchdog cycle in
+ * the VF driver.
+ */
+ if (vsi->type == I40E_VSI_SRIOV)
+ msleep(2000);
+ i40e_up(vsi);
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+ int i;
+
+ /* It is assumed that the caller of this function
+ * sets the vsi->state __I40E_DOWN bit.
+ */
+ if (vsi->netdev) {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_disable(vsi->netdev);
+ }
+ i40e_vsi_disable_irq(vsi);
+ i40e_vsi_control_rings(vsi, false);
+ i40e_napi_disable_all(vsi);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_clean_tx_ring(&vsi->tx_rings[i]);
+ i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ }
+}
+
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ **/
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc = 0;
+ int ret = -EINVAL;
+ int i;
+
+ /* Check if DCB enabled to continue */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev, "DCB is not enabled for adapter\n");
+ goto exit;
+ }
+
+ /* Check if MFP enabled */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
+ goto exit;
+ }
+
+ /* Check whether tc count is within enabled limit */
+ if (tc > i40e_pf_get_num_tc(pf)) {
+ netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+ goto exit;
+ }
+
+ /* Generate TC map for number of tc requested */
+ for (i = 0; i < tc; i++)
+ enabled_tc |= (1 << i);
+
+ /* Requesting same TC configuration as already enabled */
+ if (enabled_tc == vsi->tc_config.enabled_tc)
+ return 0;
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ /* Configure VSI for enabled TCs */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+ vsi->seid);
+ goto exit;
+ }
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+
+exit:
+ return ret;
+}
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_open(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__I40E_TESTING, &pf->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate descriptors */
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+
+ if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
+ err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
+ if (err)
+ netdev_info(netdev,
+ "couldn't set broadcast err %d aq_err %d\n",
+ err, pf->hw.aq.asq_last_status);
+ }
+
+ return 0;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ return err;
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+static int i40e_close(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (test_and_set_bit(__I40E_DOWN, &vsi->state))
+ return 0;
+
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+{
+ u32 val;
+
+ WARN_ON(in_interrupt());
+
+ /* do the biggest reset indicated */
+ if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+
+ /* Request a Global Reset
+ *
+ * This will start the chip's countdown to the actual full
+ * chip reset event, and a warning interrupt to be sent
+ * to all PFs, including the requestor. Our handler
+ * for the warning interrupt will deal with the shutdown
+ * and recovery of the switch setup.
+ */
+ dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+ } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+
+ /* Request a Core Reset
+ *
+ * Same as Global Reset, except does *not* include the MAC/PHY
+ */
+ dev_info(&pf->pdev->dev, "CoreR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_CORER_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
+ } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+
+ /* Request a PF Reset
+ *
+ * Resets only the PF-specific registers
+ *
+ * This goes directly to the tear-down and rebuild of
+ * the switch, since we need to do all the recovery as
+ * for the Core Reset.
+ */
+ dev_info(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf);
+
+ } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ int v;
+
+ /* Find the VSI(s) that requested a re-init */
+ dev_info(&pf->pdev->dev,
+ "VSI reinit requested\n");
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ if (vsi != NULL &&
+ test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ i40e_vsi_reinit_locked(pf->vsi[v]);
+ clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ }
+ }
+
+ /* no further action needed, so return now */
+ return;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "bad reset request 0x%08x\n", reset_flags);
+ return;
+ }
+}
+
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lan_overflow *data =
+ (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ u32 queue = le32_to_cpu(data->prtdcb_rupto);
+ u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ u16 vf_id;
+
+ dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ __func__, queue, qtx_ctl);
+
+ /* Queue belongs to VF, find the VF and issue VF reset */
+ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+ >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+ vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+ >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
+ }
+}
+
+/**
+ * i40e_service_event_complete - Finish up the service event
+ * @pf: board private structure
+ **/
+static void i40e_service_event_complete(struct i40e_pf *pf)
+{
+ BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+}
+
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+ if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
+ return;
+
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state))
+ return;
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+ if (!vsi)
+ return;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (!vsi->netdev || !vsi->netdev_registered)
+ break;
+
+ if (link_up) {
+ netif_carrier_on(vsi->netdev);
+ netif_tx_wake_all_queues(vsi->netdev);
+ } else {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+ break;
+
+ case I40E_VSI_SRIOV:
+ break;
+
+ case I40E_VSI_VMDQ2:
+ case I40E_VSI_CTRL:
+ case I40E_VSI_MIRROR:
+ default:
+ /* there is no notification for other VSIs */
+ break;
+ }
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+ struct i40e_pf *pf;
+ int i;
+
+ if (!veb || !veb->pf)
+ return;
+ pf = veb->pf;
+
+ /* depth first... */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+ i40e_veb_link_event(pf->veb[i], link_up);
+
+ /* ... now the local VSIs */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+ i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+ bool new_link, old_link;
+
+ new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+ old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+
+ if (new_link == old_link)
+ return;
+
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+
+ /* Notify the base of the switch tree connected to
+ * the link. Floating VEBs are not notified.
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ else
+ i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+
+ if (pf->vf)
+ i40e_vc_notify_link_state(pf);
+}
+
+/**
+ * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
+ * @pf: board private structure
+ *
+ * Set the per-queue flags to request a check for stuck queues in the irq
+ * clean functions, then force interrupts to be sure the irq clean is called.
+ **/
+static void i40e_check_hang_subtask(struct i40e_pf *pf)
+{
+ int i, v;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* for each VSI/netdev
+ * for each Tx queue
+ * set the check flag
+ * for each q_vector
+ * force an interrupt
+ */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ int armed = 0;
+
+ if (!pf->vsi[v] ||
+ test_bit(__I40E_DOWN, &vsi->state) ||
+ (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ set_check_for_tx_hang(&vsi->tx_rings[i]);
+ if (test_bit(__I40E_HANG_CHECK_ARMED,
+ &vsi->tx_rings[i].state))
+ armed++;
+ }
+
+ if (armed) {
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ } else {
+ u16 vec = vsi->base_vector - 1;
+ u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ for (i = 0; i < vsi->num_q_vectors; i++, vec++)
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(vec), val);
+ }
+ i40e_flush(&vsi->back->hw);
+ }
+ }
+}
+
+/**
+ * i40e_watchdog_subtask - Check and bring link up
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ i40e_update_stats(pf->vsi[i]);
+
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+ u32 reset_flags = 0;
+
+ if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ }
+
+ /* If there's a recovery already waiting, it takes
+ * precedence before starting a new reset sequence.
+ */
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
+ i40e_handle_reset_warning(pf);
+ return;
+ }
+
+ /* If we're already down or resetting, just bail */
+ if (reset_flags &&
+ !test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ i40e_do_reset(pf, reset_flags);
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ /* save off old link status information */
+ memcpy(&pf->hw.phy.link_info_old, hw_link_info,
+ sizeof(pf->hw.phy.link_info_old));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
+ hw_link_info->link_info = status->link_info;
+ hw_link_info->an_info = status->an_info;
+ hw_link_info->ext_info = status->ext_info;
+ hw_link_info->lse_enable =
+ le16_to_cpu(status->command_flags) &
+ I40E_AQ_LSE_ENABLE;
+
+ /* process the event */
+ i40e_link_event(pf);
+
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct,
+ * then see if the status changed while processing the
+ * initial event.
+ */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+ struct i40e_arq_event_info event;
+ struct i40e_hw *hw = &pf->hw;
+ u16 pending, i = 0;
+ i40e_status ret;
+ u16 opcode;
+ u32 val;
+
+ if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
+ return;
+
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &pending);
+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ dev_info(&pf->pdev->dev, "No ARQ event found\n");
+ break;
+ } else if (ret) {
+ dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+ break;
+ }
+
+ opcode = le16_to_cpu(event.desc.opcode);
+ switch (opcode) {
+
+ case i40e_aqc_opc_get_link_status:
+ i40e_handle_link_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+ ret = i40e_vc_process_vf_msg(pf,
+ le16_to_cpu(event.desc.retval),
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low),
+ event.msg_buf,
+ event.msg_size);
+ break;
+ case i40e_aqc_opc_lldp_update_mib:
+ dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ i40e_handle_lan_overflow_event(pf, &event);
+ break;
+ default:
+ dev_info(&pf->pdev->dev,
+ "ARQ Error: Unknown event %d received\n",
+ event.desc.opcode);
+ break;
+ }
+ } while (pending && (i++ < pf->adminq_work_limit));
+
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ /* re-enable Admin queue interrupt cause */
+ val = rd32(hw, I40E_PFINT_ICR0_ENA);
+ val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+ i40e_flush(hw);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB. We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+ struct i40e_vsi *ctl_vsi = NULL;
+ struct i40e_pf *pf = veb->pf;
+ int v, veb_idx;
+ int ret;
+
+ /* build VSI that owns this VEB, temporarily attached to base VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ if (pf->vsi[v] &&
+ pf->vsi[v]->veb_idx == veb->idx &&
+ pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ ctl_vsi = pf->vsi[v];
+ break;
+ }
+ }
+ if (!ctl_vsi) {
+ dev_info(&pf->pdev->dev,
+ "missing owner VSI for veb_idx %d\n", veb->idx);
+ ret = -ENOENT;
+ goto end_reconstitute;
+ }
+ if (ctl_vsi != pf->vsi[pf->lan_vsi])
+ ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of owner VSI failed: %d\n", ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(ctl_vsi);
+
+ /* create the VEB in the switch and move the VSI onto the VEB */
+ ret = i40e_add_veb(veb, ctl_vsi);
+ if (ret)
+ goto end_reconstitute;
+
+ /* create the remaining VSIs attached to this VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ continue;
+
+ if (pf->vsi[v]->veb_idx == veb->idx) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ vsi->uplink_seid = veb->seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of vsi_idx %d failed: %d\n",
+ v, ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(vsi);
+ }
+ }
+
+ /* create any VEBs attached to this VEB - RECURSION */
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+ pf->veb[veb_idx]->uplink_seid = veb->seid;
+ ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+ if (ret)
+ break;
+ }
+ }
+
+end_reconstitute:
+ return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ u16 data_size;
+ int buf_len;
+ int err;
+
+ buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ do {
+ cap_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!cap_buf)
+ return -ENOMEM;
+
+ /* this loads the data into the hw struct for us */
+ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+ &data_size,
+ i40e_aqc_opc_list_func_capabilities,
+ NULL);
+ /* data loaded, buffer no longer needed */
+ kfree(cap_buf);
+
+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ /* retry with a larger buffer */
+ buf_len = data_size;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ dev_info(&pf->pdev->dev,
+ "capability discovery failed: aq=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -ENODEV;
+ }
+ } while (err);
+
+ if (pf->hw.debug_mask & I40E_DEBUG_USER)
+ dev_info(&pf->pdev->dev,
+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+ pf->hw.func_caps.num_msix_vectors,
+ pf->hw.func_caps.num_msix_vectors_vf,
+ pf->hw.func_caps.fd_filters_guaranteed,
+ pf->hw.func_caps.fd_filters_best_effort,
+ pf->hw.func_caps.num_tx_qp,
+ pf->hw.func_caps.num_vsis);
+
+ return 0;
+}
+
+/**
+ * i40e_fdir_setup - initialize the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi;
+ bool new_vsi = false;
+ int err, i;
+
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ return;
+
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+
+ /* find existing or make new FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+ pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
+ return;
+ }
+ new_vsi = true;
+ }
+ WARN_ON(vsi->base_queue != I40E_FDIR_RING);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_configure(vsi);
+ if (!err && new_vsi) {
+ char int_name[IFNAMSIZ + 9];
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ }
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_release(pf->vsi[i]);
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
+ clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ return;
+
+ dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+ i40e_vc_notify_reset(pf);
+
+ /* quiesce the VSIs and their queues that are not already DOWN */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ pf->vsi[v]->seid = 0;
+ }
+
+ i40e_shutdown_adminq(&pf->hw);
+
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_pf_reset(hw);
+ if (ret)
+ dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ pf->pfr_count++;
+
+ if (test_bit(__I40E_DOWN, &pf->state))
+ goto end_core_reset;
+ dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ ret = i40e_init_adminq(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_get_capabilities(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
+ ret);
+ goto end_core_reset;
+ }
+
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ /* do basic switch setup */
+ ret = i40e_setup_pf_switch(pf);
+ if (ret)
+ goto end_core_reset;
+
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+ *
+ * If there were VEBs but the reconstitution failed, we'll try
+ * try to recover minimal use by getting the basic PF VSI working.
+ */
+ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ /* find the one VEB connected to the MAC, and find orphans */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+
+ if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+ pf->veb[v]->uplink_seid == 0) {
+ ret = i40e_reconstitute_veb(pf->veb[v]);
+
+ if (!ret)
+ continue;
+
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ pf->vsi[pf->lan_vsi]->uplink_seid
+ = pf->mac_seid;
+ break;
+ } else if (pf->veb[v]->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
+ }
+ }
+ }
+ }
+
+ if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+ /* no VEB, so rebuild only the Main VSI */
+ ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of Main VSI failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ }
+
+ /* reinit the misc interrupt */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ ret = i40e_setup_misc_vector(pf);
+
+ /* restart the VSIs that were rebuilt and running before the reset */
+ i40e_pf_unquiesce_all_vsi(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ dev_info(&pf->pdev->dev, "PF reset done\n");
+
+end_core_reset:
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the pf structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ struct i40e_vf *vf;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
+ >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
+ >> I40E_GL_MDET_TX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
+ >> I40E_GL_MDET_TX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
+ >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
+ >> I40E_GL_MDET_RX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
+ >> I40E_GL_MDET_RX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ vf = &(pf->vf[i]);
+ reg = rd32(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+ }
+
+ reg = rd32(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+ }
+
+ if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work,
+ struct i40e_pf,
+ service_task);
+ unsigned long start_time = jiffies;
+
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ i40e_check_hang_subtask(pf);
+ i40e_sync_filters_subtask(pf);
+ i40e_clean_adminq_subtask(pf);
+
+ i40e_service_event_complete(pf);
+
+ /* If the tasks have taken longer than one timer cycle or there
+ * is more work to be done, reschedule the service task now
+ * rather than wait for the timer to tick again.
+ */
+ if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @data: pointer to PF struct
+ **/
+static void i40e_service_timer(unsigned long data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ vsi->alloc_queue_pairs = pf->num_lan_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_q_vectors = pf->num_lan_msix;
+ else
+ vsi->num_q_vectors = 1;
+
+ break;
+
+ case I40E_VSI_FDIR:
+ vsi->alloc_queue_pairs = 1;
+ vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = 1;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = pf->num_vmdq_msix;
+ break;
+
+ case I40E_VSI_SRIOV:
+ vsi->alloc_queue_pairs = pf->num_vf_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ break;
+
+ default:
+ WARN_ON(1);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+ int ret = -ENODEV;
+ struct i40e_vsi *vsi;
+ int vsi_idx;
+ int i;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VSI list may be fragmented if VSI creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free VSIs in the list.
+ *
+ * find next empty vsi slot, looping back around if necessary
+ */
+ i = pf->next_vsi;
+ while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ i++;
+ if (i >= pf->hw.func_caps.num_vsis) {
+ i = 0;
+ while (i < pf->next_vsi && pf->vsi[i])
+ i++;
+ }
+
+ if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ vsi_idx = i; /* Found one! */
+ } else {
+ ret = -ENODEV;
+ goto err_alloc_vsi; /* out of VSI slots! */
+ }
+ pf->next_vsi = ++i;
+
+ vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+ if (!vsi) {
+ ret = -ENOMEM;
+ goto err_alloc_vsi;
+ }
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__I40E_DOWN, &vsi->state);
+ vsi->flags = 0;
+ vsi->idx = vsi_idx;
+ vsi->rx_itr_setting = pf->rx_itr_default;
+ vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->netdev_registered = false;
+ vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+ INIT_LIST_HEAD(&vsi->mac_filter_list);
+
+ i40e_set_num_rings_in_vsi(vsi);
+
+ /* Setup default MSIX irq handler for VSI */
+ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+ pf->vsi[vsi_idx] = vsi;
+ ret = vsi_idx;
+err_alloc_vsi:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ goto free_vsi;
+ pf = vsi->back;
+
+ mutex_lock(&pf->switch_mutex);
+ if (!pf->vsi[vsi->idx]) {
+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
+ vsi->idx, vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ if (pf->vsi[vsi->idx] != vsi) {
+ dev_err(&pf->pdev->dev,
+ "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+ pf->vsi[vsi->idx]->idx,
+ pf->vsi[vsi->idx],
+ pf->vsi[vsi->idx]->type,
+ vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ /* updates the pf for this cleared vsi */
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+ mutex_unlock(&pf->switch_mutex);
+free_vsi:
+ kfree(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ int i;
+
+ vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->rx_rings) {
+ ret = -ENOMEM;
+ goto err_alloc_rings;
+ }
+
+ vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ kfree(vsi->rx_rings);
+ goto err_alloc_rings;
+ }
+
+ /* Set basic values in the rings to be used later during open() */
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+
+ tx_ring->queue_index = i;
+ tx_ring->reg_idx = vsi->base_queue + i;
+ tx_ring->ring_active = false;
+ tx_ring->vsi = vsi;
+ tx_ring->netdev = vsi->netdev;
+ tx_ring->dev = &pf->pdev->dev;
+ tx_ring->count = vsi->num_desc;
+ tx_ring->size = 0;
+ tx_ring->dcb_tc = 0;
+
+ rx_ring->queue_index = i;
+ rx_ring->reg_idx = vsi->base_queue + i;
+ rx_ring->ring_active = false;
+ rx_ring->vsi = vsi;
+ rx_ring->netdev = vsi->netdev;
+ rx_ring->dev = &pf->pdev->dev;
+ rx_ring->count = vsi->num_desc;
+ rx_ring->size = 0;
+ rx_ring->dcb_tc = 0;
+ if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
+ set_ring_16byte_desc_enabled(rx_ring);
+ else
+ clear_ring_16byte_desc_enabled(rx_ring);
+ }
+
+err_alloc_rings:
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ if (vsi) {
+ kfree(vsi->rx_rings);
+ kfree(vsi->tx_rings);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+ int err = 0;
+
+ pf->num_msix_entries = 0;
+ while (vectors >= I40E_MIN_MSIX) {
+ err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
+ if (err == 0) {
+ /* good to go */
+ pf->num_msix_entries = vectors;
+ break;
+ } else if (err < 0) {
+ /* total failure */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector reservation failed: %d\n", err);
+ vectors = 0;
+ break;
+ } else {
+ /* err > 0 is the hint for retry */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vectors wanted %d, retrying with %d\n",
+ vectors, err);
+ vectors = err;
+ }
+ }
+
+ if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ dev_info(&pf->pdev->dev,
+ "Couldn't get enough vectors, only %d available\n",
+ vectors);
+ vectors = 0;
+ }
+
+ return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+ i40e_status err = 0;
+ struct i40e_hw *hw = &pf->hw;
+ int v_budget, i;
+ int vec;
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return -ENODEV;
+
+ /* The number of vectors we'll request will be comprised of:
+ * - Add 1 for "other" cause for Admin Queue events, etc.
+ * - The number of LAN queue pairs
+ * already adjusted for the NUMA node
+ * assumes symmetric Tx/Rx pairing
+ * - The number of VMDq pairs
+ * Once we count this up, try the request.
+ *
+ * If we can't get what we want, we'll simplify to nearly nothing
+ * and try again. If that still fails, we punt.
+ */
+ pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+ v_budget = 1 + pf->num_lan_msix;
+ v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+ if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ v_budget++;
+
+ /* Scale down if necessary, and the rings will share vectors */
+ v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+
+ pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!pf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+ vec = i40e_reserve_msix_vectors(pf, v_budget);
+ if (vec < I40E_MIN_MSIX) {
+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ return -ENODEV;
+
+ } else if (vec == I40E_MIN_MSIX) {
+ /* Adjust for minimal MSIX use */
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = 0;
+ pf->num_vmdq_qps = 0;
+ pf->num_vmdq_msix = 0;
+ pf->num_lan_qps = 1;
+ pf->num_lan_msix = 1;
+
+ } else if (vec != v_budget) {
+ /* Scale vector usage down */
+ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
+ vec--; /* reserve the misc vector */
+
+ /* partition out the remaining vectors */
+ switch (vec) {
+ case 2:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 1;
+ break;
+ case 3:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 2;
+ break;
+ default:
+ pf->num_lan_msix = min_t(int, (vec / 2),
+ pf->num_lan_qps);
+ pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int v_idx, num_q_vectors;
+
+ /* if not MSIX, give the one vector only to the LAN VSI */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_q_vectors = vsi->num_q_vectors;
+ else if (vsi == pf->vsi[pf->lan_vsi])
+ num_q_vectors = 1;
+ else
+ return -EINVAL;
+
+ vsi->q_vectors = kcalloc(num_q_vectors,
+ sizeof(struct i40e_q_vector),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ return -ENOMEM;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ vsi->q_vectors[v_idx].vsi = vsi;
+ vsi->q_vectors[v_idx].v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
+ i40e_napi_poll, vsi->work_limit);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+ int err = 0;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_init_msix(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ /* rework the queue expectations without MSIX */
+ i40e_determine_queue_usage(pf);
+ }
+ }
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ err = pci_enable_msi(pf->pdev);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI init failed (%d), trying legacy.\n", err);
+ pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ }
+ }
+
+ /* track first vector for misc interrupts */
+ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ /* Only request the irq if this is the first time through, and
+ * not when we're rebuilding after a Reset
+ */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ err = request_irq(pf->msix_entries[0].vector,
+ i40e_intr, 0, pf->misc_int_name, pf);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "request_irq for msix_misc failed: %d\n", err);
+ return -EFAULT;
+ }
+ }
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* associate no queues to the misc vector */
+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+
+ i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ return err;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
+ 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
+ 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+
+ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+
+ /* The assumption is that lan qp count will be the highest
+ * qp count for any PF VSI that needs RSS.
+ * If multiple VSIs need RSS support, all the qp counts
+ * for those VSIs should be a power of 2 for RSS to work.
+ * If LAN VSI is the only consumer for RSS then this requirement
+ * is not necessary.
+ */
+ if (j == pf->rss_size)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int i40e_sw_init(struct i40e_pf *pf)
+{
+ int err = 0;
+ int size;
+
+ pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+ (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+ if (I40E_DEBUG_USER & debug)
+ pf->hw.debug_mask = debug;
+ pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+ I40E_DEFAULT_MSG_ENABLE);
+ }
+
+ /* Set default capability flags */
+ pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+ I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_RX_1BUF_ENABLED;
+
+ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ if (pf->hw.func_caps.rss) {
+ pf->flags |= I40E_FLAG_RSS_ENABLED;
+ pf->rss_size = min_t(int, pf->rss_size_max,
+ nr_cpus_node(numa_node_id()));
+ } else {
+ pf->rss_size = 1;
+ }
+
+ if (pf->hw.func_caps.dcb)
+ pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
+ else
+ pf->num_tc_qps = 0;
+
+ if (pf->hw.func_caps.fd) {
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Enabled\n");
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ }
+ } else {
+ pf->fdir_pf_filter_count = 0;
+ }
+
+ if (pf->hw.func_caps.vmdq) {
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+ pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ }
+
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
+
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.num_vfs) {
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ pf->num_req_vfs = min_t(int,
+ pf->hw.func_caps.num_vfs,
+ I40E_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
+ pf->eeprom_version = 0xDEAD;
+ pf->lan_veb = I40E_NO_VEB;
+ pf->lan_vsi = I40E_NO_VSI;
+
+ /* set up queue assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+ pf->qp_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->qp_pile) {
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+ pf->qp_pile->search_hint = 0;
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ kfree(pf->qp_pile);
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
+ pf->irq_pile->search_hint = 0;
+
+ mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+ return err;
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ **/
+static int i40e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ return 0;
+}
+
+static const struct net_device_ops i40e_netdev_ops = {
+ .ndo_open = i40e_open,
+ .ndo_stop = i40e_close,
+ .ndo_start_xmit = i40e_lan_xmit_frame,
+ .ndo_get_stats64 = i40e_get_netdev_stats_struct,
+ .ndo_set_rx_mode = i40e_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40e_set_mac,
+ .ndo_change_mtu = i40e_change_mtu,
+ .ndo_tx_timeout = i40e_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40e_netpoll,
+#endif
+ .ndo_setup_tc = i40e_setup_tc,
+ .ndo_set_features = i40e_set_features,
+ .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_get_vf_config = i40e_ndo_get_vf_config,
+};
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ int etherdev_size;
+
+ etherdev_size = sizeof(struct i40e_netdev_priv);
+ netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_SG;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ 0;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ } else {
+ /* relate the VSI_VMDQ name to the VSI_MAIN name */
+ snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ random_ether_addr(mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ }
+
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ /* vlan gets same features (except vlan offload)
+ * after any tweaks for specific VSI types
+ */
+ netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* Setup netdev TC information */
+ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+ netdev->netdev_ops = &i40e_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+ i40e_set_ethtool_ops(netdev);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+ /* remove default VSI is not allowed */
+ if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+ return;
+
+ /* there is no HW VSI for FDIR */
+ if (vsi->type == I40E_VSI_FDIR)
+ return;
+
+ i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+ return;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+ int ret = -ENODEV;
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int f_count = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ /* The PF's main VSI is already setup as part of the
+ * device initialization, so we'll not bother with
+ * the add_vsi call, but we will retrieve the current
+ * VSI context.
+ */
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return -ENOENT;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+
+ enabled_tc = i40e_pf_get_tc_map(pf);
+
+ /* MFP mode setup queue map and update VSI */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ /* update the local VSI info queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+ } else {
+ /* Default/Main VSI is only enabled for TC0
+ * reconfigure it to enable all TCs that are
+ * available on the port in SFP mode.
+ */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
+ enabled_tc, ret,
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ }
+ }
+ break;
+
+ case I40E_VSI_FDIR:
+ /* no queue mapping or actual HW VSI needed */
+ vsi->info.valid_sections = 0;
+ vsi->seid = 0;
+ vsi->id = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ return 0;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = 0;
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_SRIOV:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+ }
+
+ /* If macvlan filters already exist, force them to get loaded */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ f->changed = true;
+ f_count++;
+ }
+ if (f_count) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ pf->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+ /* Update VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get vsi bw info, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ /* VSI is already added so not tearing that up */
+ ret = 0;
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_veb *veb = NULL;
+ struct i40e_pf *pf;
+ u16 uplink_seid;
+ int i, n;
+
+ pf = vsi->back;
+
+ /* release of a VEB-owner or last VSI is not allowed */
+ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+ vsi->seid, vsi->uplink_seid);
+ return -ENODEV;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi] &&
+ !test_bit(__I40E_DOWN, &pf->state)) {
+ dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+ return -ENODEV;
+ }
+
+ uplink_seid = vsi->uplink_seid;
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ if (vsi->netdev) {
+ /* results in a call to i40e_close() */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ } else {
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+ }
+ i40e_vsi_disable_irq(vsi);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan,
+ f->is_vf, f->is_netdev);
+ i40e_sync_vsi_filters(vsi);
+
+ i40e_vsi_delete(vsi);
+ i40e_vsi_free_q_vectors(vsi);
+ i40e_vsi_clear_rings(vsi);
+ i40e_vsi_clear(vsi);
+
+ /* If this was the last thing on the VEB, except for the
+ * controlling VSI, remove the VEB, which puts the controlling
+ * VSI onto the next level down in the switch.
+ *
+ * Well, okay, there's one more exception here: don't remove
+ * the orphan VEBs yet. We'll wait for an explicit remove request
+ * from up the network stack.
+ */
+ for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] &&
+ pf->vsi[i]->uplink_seid == uplink_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ n++; /* count the VSIs */
+ }
+ }
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == uplink_seid)
+ n++; /* count the VEBs */
+ if (pf->veb[i]->seid == uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (n == 0 && veb && veb->uplink_seid != 0)
+ i40e_veb_release(veb);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi->q_vectors) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->seid);
+ return -EEXIST;
+ }
+
+ if (vsi->base_vector) {
+ dev_info(&pf->pdev->dev,
+ "VSI %d has non-zero base vector %d\n",
+ vsi->seid, vsi->base_vector);
+ return -EEXIST;
+ }
+
+ ret = i40e_alloc_q_vectors(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->seid, ret);
+ vsi->num_q_vectors = 0;
+ goto vector_setup_out;
+ }
+
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
+ if (vsi->base_vector < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get q tracking for VSI %d, err=%d\n",
+ vsi->seid, vsi->base_vector);
+ i40e_vsi_free_q_vectors(vsi);
+ ret = -ENOENT;
+ goto vector_setup_out;
+ }
+
+vector_setup_out:
+ return ret;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink_seid, u32 param1)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb = NULL;
+ int ret, i;
+ int v_idx;
+
+ /* The requested uplink_seid must be either
+ * - the PF's port seid
+ * no VEB is needed because this is the PF
+ * or this is a Flow Director special case VSI
+ * - seid of an existing VEB
+ * - seid of a VSI that owns an existing VEB
+ * - seid of a VSI that doesn't own a VEB
+ * a new VEB is created and the VSI becomes the owner
+ * - seid of the PF VSI, which is what creates the first VEB
+ * this is a special case of the previous
+ *
+ * Find which uplink_seid we were given and create a new VEB if needed
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+ veb = pf->veb[i];
+ break;
+ }
+ }
+
+ if (!veb && uplink_seid != pf->mac_seid) {
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+ vsi = pf->vsi[i];
+ break;
+ }
+ }
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+ uplink_seid);
+ return NULL;
+ }
+
+ if (vsi->uplink_seid == pf->mac_seid)
+ veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (!veb) {
+ dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+ return NULL;
+ }
+
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ uplink_seid = veb->seid;
+ }
+
+ /* get vsi sw struct */
+ v_idx = i40e_vsi_mem_alloc(pf, type);
+ if (v_idx < 0)
+ goto err_alloc;
+ vsi = pf->vsi[v_idx];
+ vsi->type = type;
+ vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+ if (type == I40E_VSI_MAIN)
+ pf->lan_vsi = v_idx;
+ else if (type == I40E_VSI_SRIOV)
+ vsi->vf_id = param1;
+ /* assign it some queues */
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* get a VSI from the hardware */
+ vsi->uplink_seid = uplink_seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ /* setup the netdev if needed */
+ case I40E_VSI_MAIN:
+ case I40E_VSI_VMDQ2:
+ ret = i40e_config_netdev(vsi);
+ if (ret)
+ goto err_netdev;
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_netdev;
+ vsi->netdev_registered = true;
+ netif_carrier_off(vsi->netdev);
+ /* fall through */
+
+ case I40E_VSI_FDIR:
+ /* set up vectors and rings if needed */
+ ret = i40e_vsi_setup_vectors(vsi);
+ if (ret)
+ goto err_msix;
+
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+
+ i40e_vsi_reset_stats(vsi);
+ break;
+
+ default:
+ /* no netdev or rings for the other VSI types */
+ break;
+ }
+
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+err_msix:
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_netdev:
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret = 0;
+ int i;
+
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw ets config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+ veb->bw_max_quanta = ets_data.tc_bw_max;
+ veb->is_abs_credits = bw_data.absolute_credits_enable;
+ tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
+ (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+ veb->bw_tc_limit_credits[i] =
+ le16_to_cpu(bw_data.tc_bw_limits[i]);
+ veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+ int ret = -ENOENT;
+ struct i40e_veb *veb;
+ int i;
+
+ /* Need to protect the allocation of switch elements at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VEB list may be fragmented if VEB creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free slots in the list.
+ *
+ * find next empty veb slot, looping back around if necessary
+ */
+ i = 0;
+ while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+ i++;
+ if (i >= I40E_MAX_VEB) {
+ ret = -ENOMEM;
+ goto err_alloc_veb; /* out of VEB slots! */
+ }
+
+ veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+ if (!veb) {
+ ret = -ENOMEM;
+ goto err_alloc_veb;
+ }
+ veb->pf = pf;
+ veb->idx = i;
+ veb->enabled_tc = 1;
+
+ pf->veb[i] = veb;
+ ret = i;
+err_alloc_veb:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+ struct i40e_pf *pf = branch->pf;
+ u16 branch_seid = branch->seid;
+ u16 veb_idx = branch->idx;
+ int i;
+
+ /* release any VEBs on this VEB - RECURSION */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == branch->seid)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Release the VSIs on this VEB, but not the owner VSI.
+ *
+ * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+ * the VEB itself, so don't use (*branch) after this loop.
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (!pf->vsi[i])
+ continue;
+ if (pf->vsi[i]->uplink_seid == branch_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ i40e_vsi_release(pf->vsi[i]);
+ }
+ }
+
+ /* There's one corner case where the VEB might not have been
+ * removed, so double check it here and remove it if needed.
+ * This case happens if the veb was created from the debugfs
+ * commands and no VSIs were added to it.
+ */
+ if (pf->veb[veb_idx])
+ i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+ if (!veb)
+ return;
+
+ if (veb->pf) {
+ struct i40e_pf *pf = veb->pf;
+
+ mutex_lock(&pf->switch_mutex);
+ if (pf->veb[veb->idx] == veb)
+ pf->veb[veb->idx] = NULL;
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+ kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_pf *pf;
+ int i, n = 0;
+
+ pf = veb->pf;
+
+ /* find the remaining VSI and check for extras */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ n++;
+ vsi = pf->vsi[i];
+ }
+ }
+ if (n != 1) {
+ dev_info(&pf->pdev->dev,
+ "can't remove VEB %d with %d VSIs left\n",
+ veb->seid, n);
+ return;
+ }
+
+ /* move the remaining VSI to uplink veb */
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ if (veb->uplink_seid) {
+ vsi->uplink_seid = veb->uplink_seid;
+ if (veb->uplink_seid == pf->mac_seid)
+ vsi->veb_idx = I40E_NO_VEB;
+ else
+ vsi->veb_idx = veb->veb_idx;
+ } else {
+ /* floating VEB */
+ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ }
+
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ i40e_veb_clear(veb);
+
+ return;
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+ bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ int ret;
+
+ /* get a VEB from the hardware */
+ ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ veb->enabled_tc, is_default, &veb->seid, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't add VEB, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+
+ /* get statistics counter */
+ ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB bw info, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ return -ENOENT;
+ }
+
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+ return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB. It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+ u16 uplink_seid, u16 vsi_seid,
+ u8 enabled_tc)
+{
+ struct i40e_veb *veb, *uplink_veb = NULL;
+ int vsi_idx, veb_idx;
+ int ret;
+
+ /* if one seid is 0, the other must be 0 to create a floating relay */
+ if ((uplink_seid == 0 || vsi_seid == 0) &&
+ (uplink_seid + vsi_seid != 0)) {
+ dev_info(&pf->pdev->dev,
+ "one, not both seid's are 0: uplink=%d vsi=%d\n",
+ uplink_seid, vsi_seid);
+ return NULL;
+ }
+
+ /* make sure there is such a vsi and uplink */
+ for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+ break;
+ if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
+ return NULL;
+ }
+
+ if (uplink_seid && uplink_seid != pf->mac_seid) {
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] &&
+ pf->veb[veb_idx]->seid == uplink_seid) {
+ uplink_veb = pf->veb[veb_idx];
+ break;
+ }
+ }
+ if (!uplink_veb) {
+ dev_info(&pf->pdev->dev,
+ "uplink seid %d not found\n", uplink_seid);
+ return NULL;
+ }
+ }
+
+ /* get veb sw struct */
+ veb_idx = i40e_veb_mem_alloc(pf);
+ if (veb_idx < 0)
+ goto err_alloc;
+ veb = pf->veb[veb_idx];
+ veb->flags = flags;
+ veb->uplink_seid = uplink_seid;
+ veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+ veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+ /* create the VEB in the switch */
+ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ if (ret)
+ goto err_veb;
+
+ return veb;
+
+err_veb:
+ i40e_veb_clear(veb);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+ struct i40e_aqc_switch_config_element_resp *ele,
+ u16 num_reported, bool printconfig)
+{
+ u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+ u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+ u8 element_type = ele->element_type;
+ u16 seid = le16_to_cpu(ele->seid);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "type=%d seid=%d uplink=%d downlink=%d\n",
+ element_type, seid, uplink_seid, downlink_seid);
+
+ switch (element_type) {
+ case I40E_SWITCH_ELEMENT_TYPE_MAC:
+ pf->mac_seid = seid;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VEB:
+ /* Main VEB? */
+ if (uplink_seid != pf->mac_seid)
+ break;
+ if (pf->lan_veb == I40E_NO_VEB) {
+ int v;
+
+ /* find existing or else empty VEB */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+ pf->lan_veb = v;
+ break;
+ }
+ }
+ if (pf->lan_veb == I40E_NO_VEB) {
+ v = i40e_veb_mem_alloc(pf);
+ if (v < 0)
+ break;
+ pf->lan_veb = v;
+ }
+ }
+
+ pf->veb[pf->lan_veb]->seid = seid;
+ pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+ pf->veb[pf->lan_veb]->pf = pf;
+ pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VSI:
+ if (num_reported != 1)
+ break;
+ /* This is immediately after a reset so we can assume this is
+ * the PF's VSI
+ */
+ pf->mac_seid = uplink_seid;
+ pf->pf_seid = downlink_seid;
+ pf->main_vsi_seid = seid;
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "pf_seid=%d main_vsi_seid=%d\n",
+ pf->pf_seid, pf->main_vsi_seid);
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_PF:
+ case I40E_SWITCH_ELEMENT_TYPE_VF:
+ case I40E_SWITCH_ELEMENT_TYPE_EMP:
+ case I40E_SWITCH_ELEMENT_TYPE_BMC:
+ case I40E_SWITCH_ELEMENT_TYPE_PE:
+ case I40E_SWITCH_ELEMENT_TYPE_PA:
+ /* ignore these for now */
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+ element_type, seid);
+ break;
+ }
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u16 next_seid = 0;
+ int ret = 0;
+ u8 *aq_buf;
+ int i;
+
+ aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+ if (!aq_buf)
+ return -ENOMEM;
+
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ do {
+ u16 num_reported, num_total;
+
+ ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+ I40E_AQ_LARGE_BUF,
+ &next_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "get switch config failed %d aq_err=%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(aq_buf);
+ return -ENOENT;
+ }
+
+ num_reported = le16_to_cpu(sw_config->header.num_reported);
+ num_total = le16_to_cpu(sw_config->header.num_total);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "header: %d reported %d total\n",
+ num_reported, num_total);
+
+ if (num_reported) {
+ int sz = sizeof(*sw_config) * num_reported;
+
+ kfree(pf->sw_config);
+ pf->sw_config = kzalloc(sz, GFP_KERNEL);
+ if (pf->sw_config)
+ memcpy(pf->sw_config, sw_config, sz);
+ }
+
+ for (i = 0; i < num_reported; i++) {
+ struct i40e_aqc_switch_config_element_resp *ele =
+ &sw_config->element[i];
+
+ i40e_setup_pf_switch_element(pf, ele, num_reported,
+ printconfig);
+ }
+ } while (next_seid != 0);
+
+ kfree(aq_buf);
+ return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf)
+{
+ int ret;
+
+ /* find out what's out there already */
+ ret = i40e_fetch_switch_configuration(pf, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't fetch switch config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+ i40e_pf_reset_stats(pf);
+
+ /* fdir VSI must happen first to be sure it gets queue 0, but only
+ * if there is enough room for the fdir VSI
+ */
+ if (pf->num_lan_qps > 1)
+ i40e_fdir_setup(pf);
+
+ /* first time setup */
+ if (pf->lan_vsi == I40E_NO_VSI) {
+ struct i40e_vsi *vsi = NULL;
+ u16 uplink_seid;
+
+ /* Set up the PF VSI associated with the PF's main VSI
+ * that is already in the HW switch
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ uplink_seid = pf->veb[pf->lan_veb]->seid;
+ else
+ uplink_seid = pf->mac_seid;
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+ i40e_fdir_teardown(pf);
+ return -EAGAIN;
+ }
+ /* accommodate kcompat by copying the main VSI queue count
+ * into the pf, since this newer code pushes the pf queue
+ * info down a level into a VSI
+ */
+ pf->num_rx_queues = vsi->alloc_queue_pairs;
+ pf->num_tx_queues = vsi->alloc_queue_pairs;
+ } else {
+ /* force a reset of TC and queue layout configurations */
+ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ }
+ i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+ /* Setup static PF queue filter control settings */
+ ret = i40e_setup_pf_filter_control(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+ ret);
+ /* Failure here should not stop continuing other steps */
+ }
+
+ /* enable RSS in the HW, even for only one queue, as the stack can use
+ * the hash
+ */
+ if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ i40e_config_rss(pf);
+
+ /* fill in link information and enable LSE reporting */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+
+ /* Initialize user-specifics link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+ pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
+ if (pf->hw.phy.link_info.an_info &
+ (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ pf->hw.fc.current_mode = I40E_FC_FULL;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_size - helper to set rss_size
+ * @pf: board private structure
+ * @queues_left: how many queues
+ */
+static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
+{
+ int num_tc0;
+
+ num_tc0 = min_t(int, queues_left, pf->rss_size_max);
+ num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+ num_tc0 = rounddown_pow_of_two(num_tc0);
+
+ return num_tc0;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+ int accum_tc_size;
+ int queues_left;
+
+ pf->num_lan_qps = 0;
+ pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
+ accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
+
+ /* Find the max queues to be put into basic use. We'll always be
+ * using TC0, whether or not DCB is running, and TC0 will get the
+ * big RSS set.
+ */
+ queues_left = pf->hw.func_caps.num_tx_qp;
+
+ if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
+ (queues_left == 1)) {
+
+ /* one qp for PF, no queues for anything else */
+ queues_left = 0;
+ pf->rss_size = pf->num_lan_qps = 1;
+
+ /* make sure all the fancies are disabled */
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save num_tc_qps queues for TCs 1 thru 7 and the rest
+ * are set up for RSS in TC0
+ */
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ queues_left -= 1; /* save 1 queue for FD */
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save 1 queue for TCs 1 thru 7,
+ * 1 queue for flow director,
+ * and the rest are set up for RSS in TC0
+ */
+ queues_left -= 1;
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Invalid configuration, flags=0x%08llx\n", pf->flags);
+ return;
+ }
+
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
+ pf->num_vf_qps));
+ queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+ }
+
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+ pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+ (queues_left / pf->num_vmdq_qps));
+ queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+ }
+
+ return;
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+ struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+ settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+ /* Flow Director is enabled */
+ if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ settings->enable_fdir = true;
+
+ /* Ethtype and MACVLAN filters enabled for PF */
+ settings->enable_ethtype = true;
+ settings->enable_macvlan = true;
+
+ if (i40e_set_filter_control(&pf->hw, settings))
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a pf identified by a pci_dev structure.
+ * The OS initialization, configuring of the pf private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct i40e_driver_version dv;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int err = 0;
+ u32 len;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* set up for high or low dma */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ /* set up pci connections */
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), i40e_driver_name);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ /* Now that we have a PCI connection, we need to do the
+ * low level device setup. This is primarily setting up
+ * the Admin Queue structures and then querying for the
+ * device's current profile information.
+ */
+ pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf) {
+ err = -ENOMEM;
+ goto err_pf_alloc;
+ }
+ pf->next_vsi = 0;
+ pf->pdev = pdev;
+ set_bit(__I40E_DOWN, &pf->state);
+
+ hw = &pf->hw;
+ hw->back = pf;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+ (unsigned int)pci_resource_start(pdev, 0),
+ (unsigned int)pci_resource_len(pdev, 0), err);
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ /* Reset here to make sure all is clean and to define PF 'n' */
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
+
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+ snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+ "%s-pf%d:misc",
+ dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+
+ err = i40e_init_shared_code(hw);
+ if (err) {
+ dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_init_adminq(hw);
+ dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (err) {
+ dev_info(&pdev->dev,
+ "init_adminq failed: %d expecting API %02x.%02x\n",
+ err,
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+ goto err_pf_reset;
+ }
+
+ err = i40e_get_capabilities(pf);
+ if (err)
+ goto err_adminq_setup;
+
+ err = i40e_sw_init(pf);
+ if (err) {
+ dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (err) {
+ dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+ goto err_init_lan_hmc;
+ }
+
+ err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (err) {
+ dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+ err = -ENOENT;
+ goto err_configure_lan_hmc;
+ }
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr)) {
+ dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+ err = -EIO;
+ goto err_mac_addr;
+ }
+ dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+
+ pci_set_drvdata(pdev, pf);
+ pci_save_state(pdev);
+
+ /* set up periodic task facility */
+ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ pf->link_check_timeout = jiffies;
+
+ /* set up the main switch operations */
+ i40e_determine_queue_usage(pf);
+ i40e_init_interrupt_scheme(pf);
+
+ /* Set up the *vsi struct based on the number of VSIs in the HW,
+ * and set up our local tracking of the MAIN PF vsi.
+ */
+ len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ pf->vsi = kzalloc(len, GFP_KERNEL);
+ if (!pf->vsi)
+ goto err_switch_setup;
+
+ err = i40e_setup_pf_switch(pf);
+ if (err) {
+ dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+ goto err_vsis;
+ }
+
+ /* The main driver is (mostly) up and happy. We need to set this state
+ * before setting up the misc vector or we get a race and the vector
+ * ends up disabled forever.
+ */
+ clear_bit(__I40E_DOWN, &pf->state);
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+ if (err) {
+ dev_info(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ goto err_vsis;
+ }
+ }
+
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ u32 val;
+
+ /* disable link interrupts for VFs */
+ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ i40e_flush(hw);
+ }
+
+ i40e_dbg_pf_init(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+ /* Unwind what we've done if something failed in the setup */
+err_vsis:
+ set_bit(__I40E_DOWN, &pf->state);
+err_switch_setup:
+ i40e_clear_interrupt_scheme(pf);
+ kfree(pf->vsi);
+ del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+err_sw_init:
+err_adminq_setup:
+ (void)i40e_shutdown_adminq(hw);
+err_pf_reset:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ kfree(pf);
+err_pf_alloc:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40e_remove(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ i40e_status ret_code;
+ u32 reg;
+ int i;
+
+ i40e_dbg_pf_exit(pf);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
+ /* no more scheduling of any task */
+ set_bit(__I40E_DOWN, &pf->state);
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ i40e_fdir_teardown(pf);
+
+ /* If there is a switch structure or any orphans, remove them.
+ * This will leave only the PF's VSI remaining.
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+
+ if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+ pf->veb[i]->uplink_seid == 0)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Now we can shutdown the PF's VSI, just before we kill
+ * adminq and hmc.
+ */
+ if (pf->vsi[pf->lan_vsi])
+ i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
+ /* shutdown and destroy the HMC */
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n", ret_code);
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(&pf->hw, true);
+ ret_code = i40e_shutdown_adminq(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the Admin Queue resources: %d\n",
+ ret_code);
+
+ /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ i40e_clear_interrupt_scheme(pf);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i]) {
+ i40e_vsi_clear_rings(pf->vsi[i]);
+ i40e_vsi_clear(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ kfree(pf->veb[i]);
+ pf->veb[i] = NULL;
+ }
+
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+ kfree(pf->sw_config);
+ kfree(pf->vsi);
+
+ /* force a PF reset to clean anything leftover */
+ reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
+ wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ i40e_flush(&pf->hw);
+
+ iounmap(pf->hw.hw_addr);
+ kfree(pf);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress. Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+ /* shutdown all operations */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset. If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ if (pci_enable_device_mem(pdev)) {
+ dev_info(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ if (reg == 0)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ i40e_handle_reset_warning(pf);
+}
+
+static const struct pci_error_handlers i40e_err_handler = {
+ .error_detected = i40e_pci_error_detected,
+ .slot_reset = i40e_pci_error_slot_reset,
+ .resume = i40e_pci_error_resume,
+};
+
+static struct pci_driver i40e_driver = {
+ .name = i40e_driver_name,
+ .id_table = i40e_pci_tbl,
+ .probe = i40e_probe,
+ .remove = i40e_remove,
+ .err_handler = &i40e_err_handler,
+ .sriov_configure = i40e_pci_sriov_configure,
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+ pr_info("%s: %s - version %s\n", i40e_driver_name,
+ i40e_driver_string, i40e_driver_version_str);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+ i40e_dbg_init();
+ return pci_register_driver(&i40e_driver);
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+ pci_unregister_driver(&i40e_driver);
+ i40e_dbg_exit();
+}
+module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 00000000000..97e1bb30ef8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,391 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers.
+ * @hw: pointer to the HW structure.
+ *
+ * Setups the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+i40e_status i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ i40e_status ret_code = 0;
+ u32 fla, gens;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB). */
+ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode. */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
+ /* Max NVM timeout. */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode. */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
+ * @hw: pointer to the HW structure.
+ * @access: NVM access type (read or write).
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ i40e_status ret_code = 0;
+ u64 gtime, timeout;
+ u64 time = 0;
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time, NULL);
+ /* Reading the Global Device Timer. */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout. */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+ if (ret_code) {
+ /* Set the polling timeout. */
+ if (time > I40E_MAX_NVM_TIMEOUT)
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ + gtime;
+ else
+ timeout = hw->nvm.hw_semaphore_timeout;
+ /* Poll until the current NVM owner timeouts. */
+ while (gtime < timeout) {
+ usleep_range(10000, 20000);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time,
+ NULL);
+ if (!ret_code) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time) + gtime;
+ break;
+ }
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ }
+ if (ret_code) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ hw->nvm.hw_semaphore_wait =
+ I40E_MS_TO_GTIME(time) + gtime;
+ hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
+ time);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership.
+ * @hw: pointer to the HW structure.
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ if (!hw->nvm.blank_nvm_mode)
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
+ * @hw: pointer to the HW structure.
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = 0;
+ break;
+ }
+ udelay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ if (offset >= hw->nvm.sr_size) {
+ hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ /* Write the address and start reading. */
+ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code)
+ hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM word.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM. Each read is preceded
+ * with the NVM ownership taking and followed by the release.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: number of words to read (in) &
+ * number of words read before the NVM ownership timeout (out).
+ * @data: words read from the Shadow RAM.
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+ u16 index, word;
+ u32 time;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
+ if (ret_code)
+ break;
+ /* Check if we didn't exceeded the semaphore timeout. */
+ time = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (time >= hw->nvm.hw_semaphore_timeout) {
+ ret_code = I40E_ERR_TIMEOUT;
+ hw_dbg(hw, "NVM read error: timeout.\n");
+ break;
+ }
+ }
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+ /* Release the NVM ownership. */
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * This function calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 word = 0;
+ u32 i = 0;
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ i++;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i == (u32)vpd_module) {
+ i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i == (u32)pcie_alt_module) {
+ i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+
+ ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ checksum_local += word;
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 checksum_sr = 0;
+ u16 checksum_local;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_exit;
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_free;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_free:
+ i40e_release_nvm(hw);
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 00000000000..702c81ba86e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 00000000000..f75bb9ccc90
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,239 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40e_init_adminq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode);
+
+/* admin send queue commands */
+
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+/* i40e_common */
+i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr);
+i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+/* prototype for functions used for NVM access */
+i40e_status i40e_init_nvm(struct i40e_hw *hw);
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 00000000000..6bd333cde28
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,4688 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPM_DMACR 0x000881F4
+#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
+#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
+#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
+#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
+#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
+#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
+#define I40E_GLPM_LTRC 0x000BE500
+#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
+#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
+#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
+#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
+#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
+#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
+#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
+#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
+#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_HPTC 0x000AC800
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
+#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
+#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
+#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
+#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
+#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
+#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
+#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
+#define I40E_PFPM_PROXYFC 0x00245A80
+#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
+#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
+#define I40E_PFPM_PROXYFC_EX_SHIFT 1
+#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
+#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_SHIFT 9
+#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
+#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
+#define I40E_PFPM_PROXYS 0x00245B80
+#define I40E_PFPM_PROXYS_EX_SHIFT 1
+#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_SHIFT 4
+#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_NS_SHIFT 9
+#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
+#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_MLD_SHIFT 12
+#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 00000000000..5e5bcddac57
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 00000000000..49d2cfa9b0c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,1817 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_input: Packet data that will be filter parameters
+ * @pf: The pf pointer
+ * @add: True for add/update, False for remove
+ **/
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ struct i40e_ring *tx_ring;
+ struct i40e_vsi *vsi;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd = 0;
+ u16 i;
+
+ /* find existing FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi)
+ return -ENOENT;
+
+ tx_ring = &vsi->tx_rings[0];
+ dev = tx_ring->dev;
+
+ dma = dma_map_single(dev, fdir_data->raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_fail;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
+ << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
+ << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+ & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
+ << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+ & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ /* Use LAN VSI Id if not programmed by user */
+ if (fdir_data->dest_vsi == 0)
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ else
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((fdir_data->dest_vsi
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+ & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdir_desc->dtype_cmd_cntindex =
+ cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
+ << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+ & I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+ & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ if (fdir_data->cnt_index != 0) {
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32((fdir_data->cnt_index
+ << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ }
+
+ fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+
+ /* Now program a dummy descriptor */
+ tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+ return 0;
+
+dma_fail:
+ return -1;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @qw: the descriptor data
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+{
+ struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ u32 error;
+
+ error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+ /* for now just print the Status */
+ dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
+ prog_id, error);
+}
+
+/**
+ * i40e_unmap_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->dma = 0;
+ tx_buffer->time_stamp = 0;
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ struct i40e_tx_buffer *tx_buffer;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer->skb)
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40e_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ for (; budget; budget--) {
+ struct i40e_tx_desc *eop_desc;
+
+ eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+ tx_buf->next_to_watch = NULL;
+ tx_buf->time_stamp = 0;
+
+ /* set memory barrier before eop_desc is verified */
+ rmb();
+
+ do {
+ i40e_unmap_tx_resource(tx_ring, tx_buf);
+
+ /* clear dtype status */
+ tx_desc->cmd_type_offset_bsz &=
+ ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+ }
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+ } while (eop_desc);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @rx_desc: the rx descriptor written back by HW
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ **/
+static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ u64 qw;
+ u8 id;
+
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+ i40e_fd_handle_status(rx_ring, qw, id);
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40e_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* IP or L4 checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH) {
+ if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ }
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ goto next_desc;
+ }
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
+ >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
+ >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ rx_ring->rx_stats.packets += total_rx_packets;
+ rx_ring->rx_stats.bytes += total_rx_bytes;
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ bool clean_complete = true;
+ int budget_per_ring;
+ int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ * Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+ for (i = 0; i < q_vector->num_ringpairs; i++) {
+ clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
+ vsi->work_limit);
+ clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
+ budget_per_ring);
+ }
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state)) {
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ } else {
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring: ring to add programming descriptor to
+ * @skb: send buffer
+ * @flags: send flags
+ * @protocol: wire protocol
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 flags, __be16 protocol)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ unsigned int hlen;
+ u32 flex_ptype, dtype_cmd;
+
+ /* make sure ATR is enabled */
+ if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ return;
+
+ /* if sampling is disabled do nothing */
+ if (!tx_ring->atr_sample_rate)
+ return;
+
+ tx_ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if (protocol == htons(ETH_P_IP)) {
+ if (hdr.ipv4->protocol != IPPROTO_TCP)
+ return;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ return;
+
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* sample on all syn/fin packets or once every atr sample rate */
+ if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ return;
+
+ tx_ring->atr_count = 0;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+ (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= th->fin ?
+ (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+ (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return -ENOMEM;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ I40E_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ }
+ }
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tx_csum - is checksum offload requested
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ *
+ * Returns true if checksum offload is requested
+ **/
+static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ !(tx_flags & I40E_TX_FLAGS_TXSW)) {
+ if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
+ return false;
+ }
+
+ return skb->ip_summed == CHECKSUM_PARTIAL;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
+ | ((u64)cd_tso_len
+ << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+ | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u32 buf_offset = 0;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ for (;;) {
+ while (size > I40E_MAX_DATA_PER_TXD) {
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ buf_offset += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_bi = &tx_ring->tx_bi[i];
+ tx_bi->length = buf_offset + size;
+ tx_bi->tx_flags = tx_flags;
+ tx_bi->dma = dma;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ if (likely(!data_len))
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ buf_offset = 0;
+ tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ frag++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_bi->bytecount = paylen + (gso_segs * hdr_len);
+ tx_bi->gso_segs = gso_segs;
+ tx_bi->skb = skb;
+
+ /* set the timestamp and next to watch values */
+ first->time_stamp = jiffies;
+ first->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(i, tx_ring->tail);
+ return;
+
+dma_error:
+ dev_info(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == __constant_htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ /* always enable offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ if (tx_flags & I40E_TX_FLAGS_CSUM)
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ /* Add Flow Director ATR if it's enabled.
+ *
+ * NOTE: this must always be directly before the data descriptor.
+ */
+ i40e_atr(tx_ring, skb, tx_flags, protocol);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 00000000000..b1d7722d98a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,259 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x07FF
+#define I40E_MIN_ITR 0x0001
+#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+#define I40E_ITR_NONE 3
+#define I40E_RX_ITR 0
+#define I40E_TX_ITR 1
+#define I40E_PE_ITR 2
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
+#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u32 tx_flags;
+ struct i40e_tx_desc *next_to_watch;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+#define I40E_MAX_RINGPAIR_PER_VECTOR 8
+ /* array of pointers to rings */
+ struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 00000000000..f3f22b20f02
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1154 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_SFP_XL710_DEVICE_ID 0x1572
+#define I40E_SFP_X710_DEVICE_ID 0x1573
+#define I40E_QEMU_DEVICE_ID 0x1574
+#define I40E_KX_A_DEVICE_ID 0x157F
+#define I40E_KX_B_DEVICE_ID 0x1580
+#define I40E_KX_C_DEVICE_ID 0x1581
+#define I40E_KX_D_DEVICE_ID 0x1582
+#define I40E_QSFP_A_DEVICE_ID 0x1583
+#define I40E_QSFP_B_DEVICE_ID 0x1584
+#define I40E_QSFP_C_DEVICE_ID 0x1585
+#define I40E_VF_DEVICE_ID 0x154C
+#define I40E_VF_HV_DEVICE_ID 0x1571
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define I40E_IS_MULTICAST(address) \
+ (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Value 0-25 are reserved for future use */
+ I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
+ I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
+ I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Value 37 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xF
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 00000000000..cc6654f1dac
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 00000000000..8967e58e240
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vf relative vsi id
+ *
+ * check for the valid vsi id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+ u8 qid)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return qid < pf->vsi[vsi_id]->num_queue_pairs;
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the vf info
+ * @vector_id: vf relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return pf relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+ u8 vsi_queue_id)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+ u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+ if (le16_to_cpu(vsi->info.mapping_flags) &
+ I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+ pf_queue_id =
+ le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+ else
+ pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+ vsi_queue_id;
+
+ return pf_queue_id;
+}
+
+/**
+ * i40e_ctrl_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+ struct i40e_virtchnl_vector_map *vecmap)
+{
+ unsigned long linklistmap = 0, tempmap;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vsi_queue_id, pf_queue_id;
+ enum i40e_queue_type qtype;
+ u16 next_q, vector_id;
+ u32 reg, reg_idx;
+ u16 itr_idx = 0;
+
+ vector_id = vecmap->vector_id;
+ /* setup the head */
+ if (0 == vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ * vf->vf_id) + (vector_id - 1));
+
+ if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+ /* Special case - No queues mapped on this vector */
+ wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto irq_list_done;
+ }
+ tempmap = vecmap->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
+ vsi_queue_id =
+ find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
+ }
+
+ tempmap = vecmap->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ + 1));
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ next_q = find_first_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+ wr32(hw, reg_idx, reg);
+
+ while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+ itr_idx = vecmap->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+ itr_idx = vecmap->txitr_idx;
+ break;
+ default:
+ break;
+ }
+
+ next_q = find_next_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES),
+ next_q + 1);
+ if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+ vsi_queue_id);
+ } else {
+ pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ qtype = 0;
+ }
+
+ /* format for the RQCTL & TQCTL regs is same */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ wr32(hw, reg_idx, reg);
+ }
+
+irq_list_done:
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ u16 pf_queue_id;
+ u32 qtx_ctl;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+ /* only set the required fields */
+ tx_ctx.base = info->dma_ring_addr / 128;
+ tx_ctx.qlen = info->ring_len;
+ tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Tx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Tx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* associate this queue with the PCI VF function */
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+ << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+ & I40E_QTX_CTL_VFVM_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+ i40e_flush(hw);
+
+error_context:
+ return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ u16 pf_queue_id;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+ /* only set the required fields */
+ rx_ctx.base = info->dma_ring_addr / 128;
+ rx_ctx.qlen = info->ring_len;
+
+ if (info->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ /* header length validation */
+ if (info->hdr_size > ((2 * 1024) - 64)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ /* set splitalways mode 10b */
+ rx_ctx.dtype = 0x2;
+ }
+
+ /* databuffer length validation */
+ if (info->databuffer_size > ((16 * 1024) - 128)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ /* max pkt. length validation */
+ if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.rxmax = info->max_pkt_size;
+
+ /* enable 32bytes desc always */
+ rx_ctx.dsize = 1;
+
+ /* default values */
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Rx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Rx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the vf info
+ * @type: type of VSI to allocate
+ *
+ * alloc vf vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+{
+ struct i40e_mac_filter *f = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ int ret = 0;
+
+ vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev,
+ "add vsi failed for vf %d, aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto error_alloc_vsi_res;
+ }
+ if (type == I40E_VSI_SRIOV) {
+ vf->lan_vsi_index = vsi->idx;
+ vf->lan_vsi_id = vsi->id;
+ dev_info(&pf->pdev->dev,
+ "LAN VSI index %d, VSI id %d\n",
+ vsi->idx, vsi->id);
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+ 0, true, false);
+ }
+ if (!f) {
+ dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
+ ret = -ENOMEM;
+ goto error_alloc_vsi_res;
+ }
+
+ /* program mac filter */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ goto error_alloc_vsi_res;
+ }
+
+ /* accept bcast pkts. by default */
+ ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
+ vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
+ ret = -EINVAL;
+ }
+
+error_alloc_vsi_res:
+ return ret;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+int i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, msix_vf;
+ bool rsd = false;
+ u16 pf_queue_id;
+ int i, j;
+
+ /* warn the VF */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
+
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* PF triggers VFR only when VF requests, in case of
+ * VFLR, HW triggers VFR
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 4; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
+ vf->vf_id);
+
+ /* fast disable qps */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ }
+
+ /* Queue enable/disable requires driver to
+ * first reset the vf & than poll the status register
+ * to make sure that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ }
+
+ /* clear the irq settings */
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* set the defaults for the rqctl & tqctl registers */
+ reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
+ wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
+ }
+
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ i40e_flush(hw);
+
+ return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * enable vf mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_queue_pairs = 0;
+ int j;
+
+ /* Tell the hardware we're using noncontiguous mapping. HW requires
+ * that VF queues be mapped using this method, even when they are
+ * contiguous in real life
+ */
+ wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* enable VF vplan_qtable mappings */
+ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+ /* map PF queues to VF queues */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+ total_queue_pairs++;
+ }
+
+ /* map PF queues to VSI */
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+ reg = 0x07FF07FF; /* unused */
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * disable vf mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* disable qp mappings */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+ for (i = 0; i < I40E_MAX_VSI_QP; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+ I40E_QUEUE_END_OF_LIST);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the vf info
+ *
+ * free vf resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_index) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
+ vf->lan_vsi_index = 0;
+ vf->lan_vsi_id = 0;
+ }
+ /* reset some of the state varibles keeping
+ * track of the resources
+ */
+ vf->num_queue_pairs = 0;
+ vf->vf_states = 0;
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the vf info
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int total_queue_pairs = 0;
+ int ret;
+
+ /* allocate hw vsi context & associated resources */
+ ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+ if (ret)
+ goto error_alloc;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* store the total qps number for the runtime
+ * vf req validation
+ */
+ vf->num_queue_pairs = total_queue_pairs;
+
+ /* vf is now completely initialized */
+ set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+ if (ret)
+ i40e_free_vf_res(vf);
+
+ return ret;
+}
+
+/**
+ * i40e_vfs_are_assigned
+ * @pf: pointer to the pf structure
+ *
+ * Determine if any VFs are assigned to VMs
+ **/
+static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
+{
+ struct pci_dev *pdev = pf->pdev;
+ struct pci_dev *vfdev;
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ I40E_VF_DEVICE_ID,
+ vfdev);
+ }
+
+ return false;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the pf structure
+ *
+ * free vf resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!pf->vf)
+ return;
+
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+
+ /* free up vf resources */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_free_vf_res(&pf->vf[i]);
+ /* disable qp mappings */
+ i40e_disable_vf_mappings(&pf->vf[i]);
+ }
+
+ kfree(pf->vf);
+ pf->vf = NULL;
+ pf->num_alloc_vfs = 0;
+
+ if (!i40e_vfs_are_assigned(pf))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev,
+ "unable to disable SR-IOV because VFs are assigned.\n");
+
+ /* Re-enable interrupt 0. */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
+ i40e_flush(hw);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the pf structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+ struct i40e_vf *vfs;
+ int i, ret = 0;
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "pci_enable_sriov failed with error %d!\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
+
+ /* allocate memory */
+ vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+
+ ret = i40e_alloc_vf_res(&vfs[i]);
+ i40e_reset_vf(&vfs[i], true);
+ if (ret)
+ break;
+
+ /* enable vf vplan_qtable mappings */
+ i40e_enable_vf_mappings(&vfs[i]);
+ }
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+ if (ret)
+ i40e_free_vfs(pf);
+err_iov:
+ return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int pre_existing_vfs = pci_num_vf(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ i40e_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (num_vfs > pf->num_req_vfs) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ err = i40e_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+ goto err_out;
+ }
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return i40e_pci_sriov_enable(pdev, num_vfs);
+
+ i40e_free_vfs(pf);
+ return 0;
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the vf info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to vf
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_invalid_msgs++;
+ dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
+ v_opcode, v_retval);
+ if (vf->num_invalid_msgs >
+ I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ } else {
+ vf->num_valid_msgs++;
+ }
+
+ aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the vf info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to vf
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ enum i40e_virtchnl_ops opcode,
+ i40e_status retval)
+{
+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the vf info
+ *
+ * called from the vf to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_version_info info = {
+ I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+ };
+
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (u8 *)&info,
+ sizeof(struct
+ i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_vf_resource *vfres = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+ int i = 0, len = 0;
+ int num_vsis = 1;
+ int ret;
+
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ len = (sizeof(struct i40e_virtchnl_vf_resource) +
+ sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!vsi->info.pvid)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->num_vsis = num_vsis;
+ vfres->num_queue_pairs = vf->num_queue_pairs;
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ if (vf->lan_vsi_index) {
+ vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+ vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[i].num_queue_pairs =
+ pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ memcpy(vfres->vsi_res[i].default_mac_addr,
+ vf->default_lan_addr.addr, ETH_ALEN);
+ i++;
+ }
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+ /* send the response back to the vf */
+ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ aq_ret, (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to reset itself,
+ * unlike other virtchnl messages, pf driver
+ * doesn't send the response back to the vf
+ **/
+static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return -ENOENT;
+
+ return i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the promiscuous mode of
+ * vf vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
+ u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_promisc_info *info =
+ (struct i40e_virtchnl_promisc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool allmulti = false;
+ bool promisc = false;
+ i40e_status aq_ret;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ promisc = true;
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
+ promisc, NULL);
+ if (aq_ret)
+ goto error_param;
+
+ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+ allmulti, NULL);
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *qci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *qpi;
+ u16 vsi_id, vsi_queue_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi_id = qci->vsi_id;
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ vsi_queue_id = qpi->txq.queue_id;
+ if ((qpi->txq.vsi_id != vsi_id) ||
+ (qpi->rxq.vsi_id != vsi_id) ||
+ (qpi->rxq.queue_id != vsi_queue_id) ||
+ !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->rxq) ||
+ i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->txq)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_irq_map_info *irqmap_info =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ u16 vsi_id, vsi_queue_id, vector_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ tempmap = map->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ tempmap = map->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ i40e_config_irq_link_list(vf, vsi_id, map);
+ }
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_eth_stats stats;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vqs->vsi_id];
+ if (!vsi) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_update_eth_stats(vsi);
+ memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+
+error_param:
+ /* send the response back to the vf */
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
+ al->list[i].addr);
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* add new addresses to the list */
+ for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
+
+ f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+ if (f) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
+ true, false);
+ else
+ f = i40e_add_filter(vsi, al->list[i].addr, -1,
+ true, false);
+ }
+
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF MAC filter\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* delete addresses from the list */
+ for (i = 0; i < al->num_elements; i++)
+ i40e_del_filter(vsi, al->list[i].addr,
+ I40E_VLAN_ANY, true, false);
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_vlan_stripping_enable(vsi);
+ for (i = 0; i < vfl->num_elements; i++) {
+ /* add new VLAN filter */
+ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to delete VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_fcoe_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf for the fcoe msgs
+ **/
+static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ aq_ret = I40E_ERR_NOT_IMPLEMENTED;
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len;
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct i40e_virtchnl_version_info);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_txq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vsi_queue_config_info *vqc =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ i40e_virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_irq_map_info *vimi =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_ether_addr_list *veal =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct i40e_virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ /* These are always errors coming from the VF. */
+ case I40E_VIRTCHNL_OP_EVENT:
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ return -EPERM;
+ break;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format)) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the pf structure
+ * @vf_id: source vf id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from vf
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_vf *vf = &(pf->vf[vf_id]);
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ pf->vf_aq_requests++;
+ /* perform basic checks on the msg */
+ ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ return ret;
+ }
+ wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ret = i40e_vc_get_version_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ret = i40e_vc_get_vf_resources_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ret = i40e_vc_reset_vf_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_FCOE:
+ ret = i40e_vc_fcoe_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev,
+ "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+ I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the pf structure
+ *
+ * called from the vlfr irq handler to
+ * free up vf resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+ u32 reg, reg_idx, bit_idx, vf_id;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ return 0;
+
+ clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ vf = &pf->vf[vf_id];
+ reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+ if (reg & (1 << bit_idx)) {
+ /* clear the bit in GLGEN_VFLRSTAT */
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+
+ if (i40e_reset_vf(vf, true))
+ dev_err(&pf->pdev->dev,
+ "Unable to reset the VF %d\n", vf_id);
+ /* free up vf resources to destroy vsi state */
+ i40e_free_vf_res(vf);
+
+ /* allocate new vf resources with the default state */
+ if (i40e_alloc_vf_res(vf))
+ dev_err(&pf->pdev->dev,
+ "Unable to allocate VF resources %d\n",
+ vf_id);
+
+ i40e_enable_vf_mappings(vf);
+ }
+ }
+
+ /* re-enable vflr interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the pf structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ vf++;
+ }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the pf structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
+
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the pf structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the vf structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (u8 *)&pfe,
+ sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @mac: mac address
+ *
+ * program vf mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_mac_filter *f;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev,
+ "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF ethernet address\n");
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ /* delete the temporary mac address */
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+
+ /* add the new mac address */
+ f = i40e_add_filter(vsi, mac, 0, true, false);
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF ucast filter\n");
+ ret = -ENOMEM;
+ goto error_param;
+ }
+
+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
+ /* program mac filter */
+ if (i40e_sync_vsi_filters(vsi)) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ ret = -EIO;
+ goto error_param;
+ }
+ memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+ ret = 0;
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program vf vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if (vsi->info.pvid) {
+ /* kill old VLAN */
+ ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "remove VLAN failed, ret=%d, aq_err=%d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ }
+ if (vlan_id || qos)
+ ret = i40e_vsi_add_pvid(vsi,
+ vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter */
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ goto error_pvid;
+ }
+ }
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+ goto error_pvid;
+ }
+ ret = 0;
+
+error_pvid:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @tx_rate: tx rate
+ *
+ * configure vf tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @ivi: vf configuration structure
+ *
+ * return vf configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ /* first vsi is always the LAN vsi */
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ ivi->vf = vf_id;
+
+ /* first entry of the list is the default ethernet address */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
+ break;
+ }
+
+ ivi->tx_rate = 0;
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+ I40E_VLAN_PRIORITY_SHIFT;
+ ret = 0;
+
+error_param:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 00000000000..360382cf304
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_MACVLAN_FILTERS 256
+#define I40E_MAX_VLAN_FILTERS 256
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
+
+#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_MASK 0xFFF
+#define I40E_PRIORITY_MASK 0x7000
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+ I40E_QUEUE_CTRL_UNKNOWN = 0,
+ I40E_QUEUE_CTRL_ENABLE,
+ I40E_QUEUE_CTRL_ENABLECHECK,
+ I40E_QUEUE_CTRL_DISABLE,
+ I40E_QUEUE_CTRL_DISABLECHECK,
+ I40E_QUEUE_CTRL_FASTDISABLE,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+ I40E_VF_STAT_INIT = 0,
+ I40E_VF_STAT_ACTIVE,
+ I40E_VF_STAT_FCOEENA,
+ I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+ I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+ I40E_VIRTCHNL_VF_CAP_L2,
+};
+
+/* VF information structure */
+struct i40e_vf {
+ struct i40e_pf *pf;
+
+ /* vf id in the pf space */
+ u16 vf_id;
+ /* all vf vsis connect to the same parent */
+ enum i40e_switch_element_types parent_type;
+
+ /* vf Port Extender (PE) stag if used */
+ u16 stag;
+
+ struct i40e_virtchnl_ether_addr default_lan_addr;
+ struct i40e_virtchnl_ether_addr default_fcoe_addr;
+
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u8 lan_vsi_index; /* index into PF struct */
+ u8 lan_vsi_id; /* ID as used by firmware */
+
+ u8 num_queue_pairs; /* num of qps assigned to vf vsis */
+ u64 num_mdd_events; /* num of mdd events detected */
+ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
+ u64 num_valid_msgs; /* num of valid msgs detected */
+
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_states; /* vf's runtime states */
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* vf configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi);
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 270e65f2110..a36fa80968e 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -996,14 +996,14 @@ static int korina_open(struct net_device *dev)
* that handles the Done Finished
* Ovr and Und Events */
ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Rx", dev);
+ 0, "Korina ethernet Rx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
dev->name, lp->rx_irq);
goto err_release;
}
ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Tx", dev);
+ 0, "Korina ethernet Tx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
dev->name, lp->tx_irq);
@@ -1012,7 +1012,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for overrun error. */
ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
- IRQF_DISABLED, "Ethernet Overflow", dev);
+ 0, "Ethernet Overflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
dev->name, lp->ovr_irq);
@@ -1021,7 +1021,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for underflow error. */
ret = request_irq(lp->und_irq, korina_und_interrupt,
- IRQF_DISABLED, "Ethernet Underflow", dev);
+ 0, "Ethernet Underflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
dev->name, lp->und_irq);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 88349b8fa39..81bf83604c4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -430,7 +430,7 @@ struct qlcnic_hardware_context {
u8 diag_test;
u8 num_msix;
u8 nic_mode;
- char diag_cnt;
+ int diag_cnt;
u16 max_uc_count;
u16 port_type;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 949076f4e6a..13e6fff8ca2 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1734,7 +1734,8 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
- unsigned int isum_seed, tsum_seed, id, seq;
+ unsigned int isum_seed, tsum_seed, seq;
+ unsigned int uninitialized_var(id);
int is_ipv6;
long f_id = -1; /* id of the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
@@ -1781,7 +1782,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
} else {
ih = (struct iphdr *)(buf + ih_off);
ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id);
+ ih->id = htons(id++);
ih->check = csum_long(isum_seed + ih->tot_len +
ih->id) ^ 0xffff;
}
@@ -1818,7 +1819,6 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
slot++;
}
- id++;
seq += p_len;
/* The last segment may be less than gso_size. */
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 510b9c8d23a..31bcb98ef35 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1488,7 +1488,7 @@ static void
toshoboe_close (struct pci_dev *pci_dev)
{
int i;
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
IRDA_DEBUG (4, "%s()\n", __func__);
@@ -1696,7 +1696,7 @@ freeself:
static int
toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
{
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
int i = 10;
@@ -1725,7 +1725,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
static int
toshoboe_wakeup (struct pci_dev *pci_dev)
{
- struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
IRDA_DEBUG (4, "%s()\n", __func__);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 5f4758492e4..c5bd58b4d8a 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -543,7 +543,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
int crclen, len = 0;
struct sk_buff *skb;
int ret = 0;
- struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
+ struct net_device *ndev = pci_get_drvdata(r->pdev);
vlsi_irda_dev_t *idev = netdev_priv(ndev);
pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 64dfaa303dc..9bf46bd19b8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -118,8 +118,6 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
const struct ethhdr *eth, bool local)
{
struct net_device *dev = vlan->dev;
- if (!skb)
- return NET_RX_DROP;
if (local)
return vlan->forward(dev, skb);
@@ -171,9 +169,13 @@ static void macvlan_broadcast(struct sk_buff *skb,
hash = mc_hash(vlan, eth->h_dest);
if (!test_bit(hash, vlan->mc_filter))
continue;
+
+ err = NET_RX_DROP;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan, eth,
- mode == MACVLAN_MODE_BRIDGE);
+ if (likely(nskb))
+ err = macvlan_broadcast_one(
+ nskb, vlan, eth,
+ mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3a8131582e7..6312332afeb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -518,6 +518,135 @@ static const struct usb_device_id products[] = {
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+ {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x7101, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x7102, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x8000, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x8001, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9000, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9003, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9005, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x900a, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900b, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x900c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x900d, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x900f, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9010, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9010, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9011, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ {QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9032, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9033, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9034, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9035, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9036, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9037, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9038, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x903b, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x903c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x903d, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x903e, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9043, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9046, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9047, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9048, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x904c, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9050, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9052, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9053, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9053, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9054, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9054, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9055, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9056, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9062, 9)},
+ {QMI_FIXED_INTF(0x05c6, 0x9064, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9065, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9065, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9066, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9066, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9067, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 2)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9068, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9069, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9070, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9070, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9075, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9076, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9077, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9078, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9079, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 5)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 6)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 7)},
+ {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+ {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+ {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
@@ -612,7 +741,6 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index e486e416d5a..865d3f66c86 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1176,65 +1176,10 @@ int of_property_count_strings(struct device_node *np, const char *propname)
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- * the table
- *
- * Returns the device_node pointer with refcount incremented. Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name, int index)
-{
- const __be32 *phandle;
- int size;
-
- phandle = of_get_property(np, phandle_name, &size);
- if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
- return NULL;
-
- return of_find_node_by_phandle(be32_to_cpup(phandle + index));
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name: property name that specifies phandles' arguments count
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->node
- * pointer.
- *
- * Example:
- *
- * phandle1: node1 {
- * #list-cells = <2>;
- * }
- *
- * phandle2: node2 {
- * #list-cells = <1>;
- * }
- *
- * node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
- * }
- *
- * To get a device_node of the `node2' node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
static int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
- const char *cells_name, int index,
+ const char *cells_name,
+ int cell_count, int index,
struct of_phandle_args *out_args)
{
const __be32 *list, *list_end;
@@ -1262,19 +1207,32 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
if (phandle) {
/*
* Find the provider node and parse the #*-cells
- * property to determine the argument length
+ * property to determine the argument length.
+ *
+ * This is not needed if the cell count is hard-coded
+ * (i.e. cells_name not set, but cell_count is set),
+ * except when we're going to return the found node
+ * below.
*/
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_err("%s: could not find phandle\n",
- np->full_name);
- goto err;
+ if (cells_name || cur_index == index) {
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ goto err;
+ }
}
- if (of_property_read_u32(node, cells_name, &count)) {
- pr_err("%s: could not get %s for %s\n",
- np->full_name, cells_name,
- node->full_name);
- goto err;
+
+ if (cells_name) {
+ if (of_property_read_u32(node, cells_name,
+ &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ goto err;
+ }
+ } else {
+ count = cell_count;
}
/*
@@ -1334,17 +1292,117 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
return rc;
}
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Returns the device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct of_phandle_args args;
+
+ if (index < 0)
+ return NULL;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * }
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name, int index,
struct of_phandle_args *out_args)
{
if (index < 0)
return -EINVAL;
- return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+ index, out_args);
}
EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cell_count,
+ int index, struct of_phandle_args *out_args)
+{
+ if (index < 0)
+ return -EINVAL;
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
+/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
@@ -1362,7 +1420,8 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name)
{
- return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+ NULL);
}
EXPORT_SYMBOL(of_count_phandle_with_args);
@@ -1734,6 +1793,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
ap = dt_alloc(sizeof(*ap) + len + 1, 4);
if (!ap)
continue;
+ memset(ap, 0, sizeof(*ap) + len + 1);
ap->alias = start;
of_alias_add(ap, np, id, start, len);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4fb06f3e7b3..229dd9d69e1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,12 +11,14 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
@@ -125,13 +127,13 @@ int of_fdt_match(struct boot_param_header *blob, unsigned long node,
return score;
}
-static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+static void *unflatten_dt_alloc(void **mem, unsigned long size,
unsigned long align)
{
void *res;
- *mem = ALIGN(*mem, align);
- res = (void *)*mem;
+ *mem = PTR_ALIGN(*mem, align);
+ res = *mem;
*mem += size;
return res;
@@ -146,9 +148,9 @@ static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
* @allnextpp: pointer to ->allnext from last allocated device_node
* @fpsize: Size of the node path up at the current depth.
*/
-static unsigned long unflatten_dt_node(struct boot_param_header *blob,
- unsigned long mem,
- unsigned long *p,
+static void * unflatten_dt_node(struct boot_param_header *blob,
+ void *mem,
+ void **p,
struct device_node *dad,
struct device_node ***allnextpp,
unsigned long fpsize)
@@ -161,15 +163,15 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
int has_name = 0;
int new_format = 0;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag != OF_DT_BEGIN_NODE) {
pr_err("Weird tag at start of node: %x\n", tag);
return mem;
}
*p += 4;
- pathp = (char *)*p;
+ pathp = *p;
l = allocl = strlen(pathp) + 1;
- *p = ALIGN(*p + l, 4);
+ *p = PTR_ALIGN(*p + l, 4);
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -201,7 +203,6 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
__alignof__(struct device_node));
if (allnextpp) {
char *fn;
- memset(np, 0, sizeof(*np));
np->full_name = fn = ((char *)np) + sizeof(*np);
if (new_format) {
/* rebuild full path for new format */
@@ -239,7 +240,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
u32 sz, noff;
char *pname;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag == OF_DT_NOP) {
*p += 4;
continue;
@@ -247,11 +248,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
if (tag != OF_DT_PROP)
break;
*p += 4;
- sz = be32_to_cpup((__be32 *)(*p));
- noff = be32_to_cpup((__be32 *)((*p) + 4));
+ sz = be32_to_cpup(*p);
+ noff = be32_to_cpup(*p + 4);
*p += 8;
if (be32_to_cpu(blob->version) < 0x10)
- *p = ALIGN(*p, sz >= 8 ? 8 : 4);
+ *p = PTR_ALIGN(*p, sz >= 8 ? 8 : 4);
pname = of_fdt_get_string(blob, noff);
if (pname == NULL) {
@@ -281,11 +282,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
np->phandle = be32_to_cpup((__be32 *)*p);
pp->name = pname;
pp->length = sz;
- pp->value = (void *)*p;
+ pp->value = *p;
*prev_pp = pp;
prev_pp = &pp->next;
}
- *p = ALIGN((*p) + sz, 4);
+ *p = PTR_ALIGN((*p) + sz, 4);
}
/* with version 0x10 we may not have the name property, recreate
* it here from the unit name if absent
@@ -334,7 +335,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
else
mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
fpsize);
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
}
if (tag != OF_DT_END_NODE) {
pr_err("Weird tag at end of node: %x\n", tag);
@@ -360,7 +361,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
struct device_node **mynodes,
void * (*dt_alloc)(u64 size, u64 align))
{
- unsigned long start, mem, size;
+ unsigned long size;
+ void *start, *mem;
struct device_node **allnextp = mynodes;
pr_debug(" -> unflatten_device_tree()\n");
@@ -381,32 +383,28 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
}
/* First pass, scan for size */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
- size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
- size = (size | 3) + 1;
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
+ size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+ size = ALIGN(size, 4);
pr_debug(" size is %lx, allocating...\n", size);
/* Allocate memory for the expanded device tree */
- mem = (unsigned long)
- dt_alloc(size + 4, __alignof__(struct device_node));
+ mem = dt_alloc(size + 4, __alignof__(struct device_node));
+ memset(mem, 0, size);
- memset((void *)mem, 0, size);
+ *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
- ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
- pr_debug(" unflattening %lx...\n", mem);
+ pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
- if (be32_to_cpup((__be32 *)start) != OF_DT_END)
- pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
- if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+ if (be32_to_cpup(start) != OF_DT_END)
+ pr_warning("Weird tag at end of tree: %08x\n", be32_to_cpup(start));
+ if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpu(((__be32 *)mem)[size / 4]));
+ be32_to_cpup(mem + size));
*allnextp = NULL;
pr_debug(" <- unflatten_device_tree()\n");
@@ -628,7 +626,8 @@ int __init of_scan_flat_dt_by_path(const char *path,
*/
void __init early_init_dt_check_for_initrd(unsigned long node)
{
- unsigned long start, end, len;
+ u64 start, end;
+ unsigned long len;
__be32 *prop;
pr_debug("Looking for initrd properties... ");
@@ -636,15 +635,16 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
- start = of_read_ulong(prop, len/4);
+ start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
- end = of_read_ulong(prop, len/4);
+ end = of_read_number(prop, len/4);
early_init_dt_setup_initrd_arch(start, end);
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end);
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
+ (unsigned long long)start, (unsigned long long)end);
}
#else
inline void early_init_dt_check_for_initrd(unsigned long node)
@@ -774,6 +774,17 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * called from unflatten_device_tree() to bootstrap devicetree itself
+ * Architectures can override this definition if memblock isn't used
+ */
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+#endif
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -792,3 +803,14 @@ void __init unflatten_device_tree(void)
}
#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+/* Feed entire flattened device tree into the random pool */
+static int __init add_fdt_randomness(void)
+{
+ if (initial_boot_params)
+ add_device_randomness(initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ return 0;
+}
+core_initcall(add_fdt_randomness);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1264923ade0..1752988d6aa 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -28,7 +28,7 @@
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
- * @device: Device node of the device whose interrupt is to be mapped
+ * @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
* This function is a wrapper that chains of_irq_map_one() and
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index ea174c8ee34..8f9be2e0993 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -39,7 +39,7 @@ static const char *phy_modes[] = {
* The function gets phy interface string from property 'phy-mode',
* and return its index in phy_modes table, or errno in error case.
*/
-const int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np)
{
const char *pm;
int err, i;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index eeca8a59697..9b439ac63d8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(of_device_alloc);
* Returns pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
-struct platform_device *of_platform_device_create_pdata(
+static struct platform_device *of_platform_device_create_pdata(
struct device_node *np,
const char *bus_id,
void *platform_data,
@@ -268,8 +268,11 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
- if (!dev)
+ if (!dev) {
+ pr_err("%s(): amba_device_alloc() failed for %s\n",
+ __func__, node->full_name);
return NULL;
+ }
/* setup generic device info */
dev->dev.coherent_dma_mask = ~0;
@@ -294,12 +297,18 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev->irq[i] = irq_of_parse_and_map(node, i);
ret = of_address_to_resource(node, 0, &dev->res);
- if (ret)
+ if (ret) {
+ pr_err("%s(): of_address_to_resource() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
ret = amba_device_add(dev, &iomem_resource);
- if (ret)
+ if (ret) {
+ pr_err("%s(): amba_device_add() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
return dev;
@@ -378,6 +387,10 @@ static int of_platform_bus_create(struct device_node *bus,
}
if (of_device_is_compatible(bus, "arm,primecell")) {
+ /*
+ * Don't return an error here to keep compatibility with older
+ * device tree files.
+ */
of_amba_device_create(bus, bus_id, platform_data, parent);
return 0;
}
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index f74bfcbb7ba..8eea2efbbb6 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -393,17 +393,21 @@ static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
complete(&gmux_data->powerchange_done);
}
-static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+static int gmux_suspend(struct device *dev)
{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
gmux_data->resume_client_id = gmux_active_client(gmux_data);
gmux_disable_interrupts(gmux_data);
return 0;
}
-static int gmux_resume(struct pnp_dev *pnp)
+static int gmux_resume(struct device *dev)
{
+ struct pnp_dev *pnp = to_pnp_dev(dev);
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
gmux_enable_interrupts(gmux_data);
gmux_switchto(gmux_data->resume_client_id);
if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
@@ -605,13 +609,19 @@ static const struct pnp_device_id gmux_device_ids[] = {
{"", 0}
};
+static const struct dev_pm_ops gmux_dev_pm_ops = {
+ .suspend = gmux_suspend,
+ .resume = gmux_resume,
+};
+
static struct pnp_driver gmux_pnp_driver = {
.name = "apple-gmux",
.probe = gmux_probe,
.remove = gmux_remove,
.id_table = gmux_device_ids,
- .suspend = gmux_suspend,
- .resume = gmux_resume
+ .driver = {
+ .pm = &gmux_dev_pm_ops,
+ },
};
static int __init apple_gmux_init(void)
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 12adb43a069..a39ee38a941 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -163,6 +163,13 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
if (!pnp_drv)
return 0;
+ if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) {
+ error = pnp_drv->driver.pm->suspend(dev);
+ suspend_report_result(pnp_drv->driver.pm->suspend, error);
+ if (error)
+ return error;
+ }
+
if (pnp_drv->suspend) {
error = pnp_drv->suspend(pnp_dev, state);
if (error)
@@ -211,6 +218,12 @@ static int pnp_bus_resume(struct device *dev)
return error;
}
+ if (pnp_drv->driver.pm && pnp_drv->driver.pm->resume) {
+ error = pnp_drv->driver.pm->resume(dev);
+ if (error)
+ return error;
+ }
+
if (pnp_drv->resume) {
error = pnp_drv->resume(pnp_dev);
if (error)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c63f4..bb49ab684f9 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -216,6 +216,13 @@ config BATTERY_S3C_ADC
help
Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
+config BATTERY_TWL4030_MADC
+ tristate "TWL4030 MADC battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable this dumb driver for batteries managed
+ through the TWL4030 MADC.
+
config CHARGER_88PM860X
tristate "Marvell 88PM860x Charger driver"
depends on MFD_88PM860X && BATTERY_88PM860X
@@ -334,6 +341,12 @@ config CHARGER_BQ2415X
You'll need this driver to charge batteries on e.g. Nokia
RX-51/N900.
+config CHARGER_BQ24190
+ tristate "TI BQ24190 battery charger driver"
+ depends on I2C && GPIOLIB
+ help
+ Say Y to enable support for the TI BQ24190 battery charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -357,7 +370,7 @@ config AB8500_BM
config BATTERY_GOLDFISH
tristate "Goldfish battery driver"
- depends on GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && (GOLDFISH || COMPILE_TEST)
help
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6ceff3..a4b74177706 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
+obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
+obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index f098fdafee9..a4c4a10b3a4 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -774,6 +774,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
di->max_usb_in_curr.usb_type_max);
+ break;
case USB_STAT_NOT_VALID_LINK:
dev_err(di->dev, "USB Type invalid - try charging anyway\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
new file mode 100644
index 00000000000..ad3ff8fbfbb
--- /dev/null
+++ b/drivers/power/bq24190_charger.c
@@ -0,0 +1,1549 @@
+/*
+ * Driver for the TI bq24190 battery charger.
+ *
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/power/bq24190_charger.h>
+
+
+#define BQ24190_MANUFACTURER "Texas Instruments"
+
+#define BQ24190_REG_ISC 0x00 /* Input Source Control */
+#define BQ24190_REG_ISC_EN_HIZ_MASK BIT(7)
+#define BQ24190_REG_ISC_EN_HIZ_SHIFT 7
+#define BQ24190_REG_ISC_VINDPM_MASK (BIT(6) | BIT(5) | BIT(4) | \
+ BIT(3))
+#define BQ24190_REG_ISC_VINDPM_SHIFT 3
+#define BQ24190_REG_ISC_IINLIM_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_ISC_IINLIM_SHIFT 0
+
+#define BQ24190_REG_POC 0x01 /* Power-On Configuration */
+#define BQ24190_REG_POC_RESET_MASK BIT(7)
+#define BQ24190_REG_POC_RESET_SHIFT 7
+#define BQ24190_REG_POC_WDT_RESET_MASK BIT(6)
+#define BQ24190_REG_POC_WDT_RESET_SHIFT 6
+#define BQ24190_REG_POC_CHG_CONFIG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4
+#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
+#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
+#define BQ24190_REG_POC_BOOST_LIM_MASK BIT(0)
+#define BQ24190_REG_POC_BOOST_LIM_SHIFT 0
+
+#define BQ24190_REG_CCC 0x02 /* Charge Current Control */
+#define BQ24190_REG_CCC_ICHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CCC_ICHG_SHIFT 2
+#define BQ24190_REG_CCC_FORCE_20PCT_MASK BIT(0)
+#define BQ24190_REG_CCC_FORCE_20PCT_SHIFT 0
+
+#define BQ24190_REG_PCTCC 0x03 /* Pre-charge/Termination Current Cntl */
+#define BQ24190_REG_PCTCC_IPRECHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4))
+#define BQ24190_REG_PCTCC_IPRECHG_SHIFT 4
+#define BQ24190_REG_PCTCC_ITERM_MASK (BIT(3) | BIT(2) | BIT(1) | \
+ BIT(0))
+#define BQ24190_REG_PCTCC_ITERM_SHIFT 0
+
+#define BQ24190_REG_CVC 0x04 /* Charge Voltage Control */
+#define BQ24190_REG_CVC_VREG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CVC_VREG_SHIFT 2
+#define BQ24190_REG_CVC_BATLOWV_MASK BIT(1)
+#define BQ24190_REG_CVC_BATLOWV_SHIFT 1
+#define BQ24190_REG_CVC_VRECHG_MASK BIT(0)
+#define BQ24190_REG_CVC_VRECHG_SHIFT 0
+
+#define BQ24190_REG_CTTC 0x05 /* Charge Term/Timer Control */
+#define BQ24190_REG_CTTC_EN_TERM_MASK BIT(7)
+#define BQ24190_REG_CTTC_EN_TERM_SHIFT 7
+#define BQ24190_REG_CTTC_TERM_STAT_MASK BIT(6)
+#define BQ24190_REG_CTTC_TERM_STAT_SHIFT 6
+#define BQ24190_REG_CTTC_WATCHDOG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_CTTC_WATCHDOG_SHIFT 4
+#define BQ24190_REG_CTTC_EN_TIMER_MASK BIT(3)
+#define BQ24190_REG_CTTC_EN_TIMER_SHIFT 3
+#define BQ24190_REG_CTTC_CHG_TIMER_MASK (BIT(2) | BIT(1))
+#define BQ24190_REG_CTTC_CHG_TIMER_SHIFT 1
+#define BQ24190_REG_CTTC_JEITA_ISET_MASK BIT(0)
+#define BQ24190_REG_CTTC_JEITA_ISET_SHIFT 0
+
+#define BQ24190_REG_ICTRC 0x06 /* IR Comp/Thermal Regulation Control */
+#define BQ24190_REG_ICTRC_BAT_COMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BQ24190_REG_ICTRC_BAT_COMP_SHIFT 5
+#define BQ24190_REG_ICTRC_VCLAMP_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_ICTRC_VCLAMP_SHIFT 2
+#define BQ24190_REG_ICTRC_TREG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_ICTRC_TREG_SHIFT 0
+
+#define BQ24190_REG_MOC 0x07 /* Misc. Operation Control */
+#define BQ24190_REG_MOC_DPDM_EN_MASK BIT(7)
+#define BQ24190_REG_MOC_DPDM_EN_SHIFT 7
+#define BQ24190_REG_MOC_TMR2X_EN_MASK BIT(6)
+#define BQ24190_REG_MOC_TMR2X_EN_SHIFT 6
+#define BQ24190_REG_MOC_BATFET_DISABLE_MASK BIT(5)
+#define BQ24190_REG_MOC_BATFET_DISABLE_SHIFT 5
+#define BQ24190_REG_MOC_JEITA_VSET_MASK BIT(4)
+#define BQ24190_REG_MOC_JEITA_VSET_SHIFT 4
+#define BQ24190_REG_MOC_INT_MASK_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_MOC_INT_MASK_SHIFT 0
+
+#define BQ24190_REG_SS 0x08 /* System Status */
+#define BQ24190_REG_SS_VBUS_STAT_MASK (BIT(7) | BIT(6))
+#define BQ24190_REG_SS_VBUS_STAT_SHIFT 6
+#define BQ24190_REG_SS_CHRG_STAT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_SS_CHRG_STAT_SHIFT 4
+#define BQ24190_REG_SS_DPM_STAT_MASK BIT(3)
+#define BQ24190_REG_SS_DPM_STAT_SHIFT 3
+#define BQ24190_REG_SS_PG_STAT_MASK BIT(2)
+#define BQ24190_REG_SS_PG_STAT_SHIFT 2
+#define BQ24190_REG_SS_THERM_STAT_MASK BIT(1)
+#define BQ24190_REG_SS_THERM_STAT_SHIFT 1
+#define BQ24190_REG_SS_VSYS_STAT_MASK BIT(0)
+#define BQ24190_REG_SS_VSYS_STAT_SHIFT 0
+
+#define BQ24190_REG_F 0x09 /* Fault */
+#define BQ24190_REG_F_WATCHDOG_FAULT_MASK BIT(7)
+#define BQ24190_REG_F_WATCHDOG_FAULT_SHIFT 7
+#define BQ24190_REG_F_BOOST_FAULT_MASK BIT(6)
+#define BQ24190_REG_F_BOOST_FAULT_SHIFT 6
+#define BQ24190_REG_F_CHRG_FAULT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_F_CHRG_FAULT_SHIFT 4
+#define BQ24190_REG_F_BAT_FAULT_MASK BIT(3)
+#define BQ24190_REG_F_BAT_FAULT_SHIFT 3
+#define BQ24190_REG_F_NTC_FAULT_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_F_NTC_FAULT_SHIFT 0
+
+#define BQ24190_REG_VPRS 0x0A /* Vendor/Part/Revision Status */
+#define BQ24190_REG_VPRS_PN_MASK (BIT(5) | BIT(4) | BIT(3))
+#define BQ24190_REG_VPRS_PN_SHIFT 3
+#define BQ24190_REG_VPRS_PN_24190 0x4
+#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193 */
+#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
+#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
+#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_VPRS_DEV_REG_SHIFT 0
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value. In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine. Each routine has its own
+ * flag indicating whether it should use the value stored by the last run
+ * of the interrupt handler or do an actual reg read. That way each routine
+ * can report back whatever fault may have occured.
+ */
+struct bq24190_dev_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply charger;
+ struct power_supply battery;
+ char model_name[I2C_NAME_SIZE];
+ kernel_ulong_t model;
+ unsigned int gpio_int;
+ unsigned int irq;
+ struct mutex f_reg_lock;
+ bool first_time;
+ bool charger_health_valid;
+ bool battery_health_valid;
+ bool battery_status_valid;
+ u8 f_reg;
+ u8 ss_reg;
+ u8 watchdog;
+};
+
+/*
+ * The tables below provide a 2-way mapping for the value that goes in
+ * the register field and the real-world value that it represents.
+ * The index of the array is the value that goes in the register; the
+ * number at that index in the array is the real-world value that it
+ * represents.
+ */
+/* REG02[7:2] (ICHG) in uAh */
+static const int bq24190_ccc_ichg_values[] = {
+ 512000, 576000, 640000, 704000, 768000, 832000, 896000, 960000,
+ 1024000, 1088000, 1152000, 1216000, 1280000, 1344000, 1408000, 1472000,
+ 1536000, 1600000, 1664000, 1728000, 1792000, 1856000, 1920000, 1984000,
+ 2048000, 2112000, 2176000, 2240000, 2304000, 2368000, 2432000, 2496000,
+ 2560000, 2624000, 2688000, 2752000, 2816000, 2880000, 2944000, 3008000,
+ 3072000, 3136000, 3200000, 3264000, 3328000, 3392000, 3456000, 3520000,
+ 3584000, 3648000, 3712000, 3776000, 3840000, 3904000, 3968000, 4032000,
+ 4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
+};
+
+/* REG04[7:2] (VREG) in uV */
+static const int bq24190_cvc_vreg_values[] = {
+ 3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
+ 3632000, 3648000, 3664000, 3680000, 3696000, 3712000, 3728000, 3744000,
+ 3760000, 3776000, 3792000, 3808000, 3824000, 3840000, 3856000, 3872000,
+ 3888000, 3904000, 3920000, 3936000, 3952000, 3968000, 3984000, 4000000,
+ 4016000, 4032000, 4048000, 4064000, 4080000, 4096000, 4112000, 4128000,
+ 4144000, 4160000, 4176000, 4192000, 4208000, 4224000, 4240000, 4256000,
+ 4272000, 4288000, 4304000, 4320000, 4336000, 4352000, 4368000, 4384000,
+ 4400000
+};
+
+/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+static const int bq24190_ictrc_treg_values[] = {
+ 600, 800, 1000, 1200
+};
+
+/*
+ * Return the index in 'tbl' of greatest value that is less than or equal to
+ * 'val'. The index range returned is 0 to 'tbl_size' - 1. Assumes that
+ * the values in 'tbl' are sorted from smallest to largest and 'tbl_size'
+ * is less than 2^8.
+ */
+static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v)
+{
+ int i;
+
+ for (i = 1; i < tbl_size; i++)
+ if (v < tbl[i])
+ break;
+
+ return i - 1;
+}
+
+/* Basic driver I/O routines */
+
+static int bq24190_read(struct bq24190_dev_info *bdi, u8 reg, u8 *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(bdi->client, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret;
+ return 0;
+}
+
+static int bq24190_write(struct bq24190_dev_info *bdi, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(bdi->client, reg, data);
+}
+
+static int bq24190_read_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 *data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= mask;
+ v >>= shift;
+ *data = v;
+
+ return 0;
+}
+
+static int bq24190_write_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= ~mask;
+ v |= ((data << shift) & mask);
+
+ return bq24190_write(bdi, reg, v);
+}
+
+static int bq24190_get_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, reg, mask, shift, &v);
+ if (ret < 0)
+ return ret;
+
+ v = (v >= tbl_size) ? (tbl_size - 1) : v;
+ *val = tbl[v];
+
+ return 0;
+}
+
+static int bq24190_set_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int val)
+{
+ u8 idx;
+
+ idx = bq24190_find_idx(tbl, tbl_size, val);
+
+ return bq24190_write_mask(bdi, reg, mask, shift, idx);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * There are a numerous options that are configurable on the bq24190
+ * that go well beyond what the power_supply properties provide access to.
+ * Provide sysfs access to them so they can be examined and possibly modified
+ * on the fly. They will be provided for the charger power_supply object only
+ * and will be prefixed by 'f_' to make them easier to recognize.
+ */
+
+#define BQ24190_SYSFS_FIELD(_name, r, f, m, store) \
+{ \
+ .attr = __ATTR(f_##_name, m, bq24190_sysfs_show, store), \
+ .reg = BQ24190_REG_##r, \
+ .mask = BQ24190_REG_##r##_##f##_MASK, \
+ .shift = BQ24190_REG_##r##_##f##_SHIFT, \
+}
+
+#define BQ24190_SYSFS_FIELD_RW(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IWUSR | S_IRUGO, \
+ bq24190_sysfs_store)
+
+#define BQ24190_SYSFS_FIELD_RO(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IRUGO, NULL)
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct bq24190_sysfs_field_info {
+ struct device_attribute attr;
+ u8 reg;
+ u8 mask;
+ u8 shift;
+};
+
+/* On i386 ptrace-abi.h defines SS that breaks the macro calls below. */
+#undef SS
+
+static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
+ /* sysfs name reg field in reg */
+ BQ24190_SYSFS_FIELD_RW(en_hiz, ISC, EN_HIZ),
+ BQ24190_SYSFS_FIELD_RW(vindpm, ISC, VINDPM),
+ BQ24190_SYSFS_FIELD_RW(iinlim, ISC, IINLIM),
+ BQ24190_SYSFS_FIELD_RW(chg_config, POC, CHG_CONFIG),
+ BQ24190_SYSFS_FIELD_RW(sys_min, POC, SYS_MIN),
+ BQ24190_SYSFS_FIELD_RW(boost_lim, POC, BOOST_LIM),
+ BQ24190_SYSFS_FIELD_RW(ichg, CCC, ICHG),
+ BQ24190_SYSFS_FIELD_RW(force_20_pct, CCC, FORCE_20PCT),
+ BQ24190_SYSFS_FIELD_RW(iprechg, PCTCC, IPRECHG),
+ BQ24190_SYSFS_FIELD_RW(iterm, PCTCC, ITERM),
+ BQ24190_SYSFS_FIELD_RW(vreg, CVC, VREG),
+ BQ24190_SYSFS_FIELD_RW(batlowv, CVC, BATLOWV),
+ BQ24190_SYSFS_FIELD_RW(vrechg, CVC, VRECHG),
+ BQ24190_SYSFS_FIELD_RW(en_term, CTTC, EN_TERM),
+ BQ24190_SYSFS_FIELD_RW(term_stat, CTTC, TERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog, CTTC, WATCHDOG),
+ BQ24190_SYSFS_FIELD_RW(en_timer, CTTC, EN_TIMER),
+ BQ24190_SYSFS_FIELD_RW(chg_timer, CTTC, CHG_TIMER),
+ BQ24190_SYSFS_FIELD_RW(jeta_iset, CTTC, JEITA_ISET),
+ BQ24190_SYSFS_FIELD_RW(bat_comp, ICTRC, BAT_COMP),
+ BQ24190_SYSFS_FIELD_RW(vclamp, ICTRC, VCLAMP),
+ BQ24190_SYSFS_FIELD_RW(treg, ICTRC, TREG),
+ BQ24190_SYSFS_FIELD_RW(dpdm_en, MOC, DPDM_EN),
+ BQ24190_SYSFS_FIELD_RW(tmr2x_en, MOC, TMR2X_EN),
+ BQ24190_SYSFS_FIELD_RW(batfet_disable, MOC, BATFET_DISABLE),
+ BQ24190_SYSFS_FIELD_RW(jeita_vset, MOC, JEITA_VSET),
+ BQ24190_SYSFS_FIELD_RO(int_mask, MOC, INT_MASK),
+ BQ24190_SYSFS_FIELD_RO(vbus_stat, SS, VBUS_STAT),
+ BQ24190_SYSFS_FIELD_RO(chrg_stat, SS, CHRG_STAT),
+ BQ24190_SYSFS_FIELD_RO(dpm_stat, SS, DPM_STAT),
+ BQ24190_SYSFS_FIELD_RO(pg_stat, SS, PG_STAT),
+ BQ24190_SYSFS_FIELD_RO(therm_stat, SS, THERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(vsys_stat, SS, VSYS_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog_fault, F, WATCHDOG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(boost_fault, F, BOOST_FAULT),
+ BQ24190_SYSFS_FIELD_RO(chrg_fault, F, CHRG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(bat_fault, F, BAT_FAULT),
+ BQ24190_SYSFS_FIELD_RO(ntc_fault, F, NTC_FAULT),
+ BQ24190_SYSFS_FIELD_RO(pn, VPRS, PN),
+ BQ24190_SYSFS_FIELD_RO(ts_profile, VPRS, TS_PROFILE),
+ BQ24190_SYSFS_FIELD_RO(dev_reg, VPRS, DEV_REG),
+};
+
+static struct attribute *
+ bq24190_sysfs_attrs[ARRAY_SIZE(bq24190_sysfs_field_tbl) + 1];
+
+static const struct attribute_group bq24190_sysfs_attr_group = {
+ .attrs = bq24190_sysfs_attrs,
+};
+
+static void bq24190_sysfs_init_attrs(void)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ bq24190_sysfs_attrs[i] = &bq24190_sysfs_field_tbl[i].attr.attr;
+
+ bq24190_sysfs_attrs[limit] = NULL; /* Has additional entry for this */
+}
+
+static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
+ const char *name)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ if (!strcmp(name, bq24190_sysfs_field_tbl[i].attr.attr.name))
+ break;
+
+ if (i >= limit)
+ return NULL;
+
+ return &bq24190_sysfs_field_tbl[i];
+}
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%hhx\n", v);
+}
+
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = kstrtou8(buf, 0, &v);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ bq24190_sysfs_init_attrs();
+
+ return sysfs_create_group(&bdi->charger.dev->kobj,
+ &bq24190_sysfs_attr_group);
+}
+
+static void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi)
+{
+ sysfs_remove_group(&bdi->charger.dev->kobj, &bq24190_sysfs_attr_group);
+}
+#else
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ return 0;
+}
+
+static inline void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi) {}
+#endif
+
+/*
+ * According to the "Host Mode and default Mode" section of the
+ * manual, a write to any register causes the bq24190 to switch
+ * from default mode to host mode. It will switch back to default
+ * mode after a WDT timeout unless the WDT is turned off as well.
+ * So, by simply turning off the WDT, we accomplish both with the
+ * same write.
+ */
+static int bq24190_set_mode_host(struct bq24190_dev_info *bdi)
+{
+ int ret;
+ u8 v;
+
+ ret = bq24190_read(bdi, BQ24190_REG_CTTC, &v);
+ if (ret < 0)
+ return ret;
+
+ bdi->watchdog = ((v & BQ24190_REG_CTTC_WATCHDOG_MASK) >>
+ BQ24190_REG_CTTC_WATCHDOG_SHIFT);
+ v &= ~BQ24190_REG_CTTC_WATCHDOG_MASK;
+
+ return bq24190_write(bdi, BQ24190_REG_CTTC, v);
+}
+
+static int bq24190_register_reset(struct bq24190_dev_info *bdi)
+{
+ int ret, limit = 100;
+ u8 v;
+
+ /* Reset the registers */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ 0x1);
+ if (ret < 0)
+ return ret;
+
+ /* Reset bit will be cleared by hardware so poll until it is */
+ do {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ if (!v)
+ break;
+
+ udelay(10);
+ } while (--limit);
+
+ if (!limit)
+ return -EIO;
+
+ return 0;
+}
+
+/* Charger power supply property routines */
+
+static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int type, ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ /* If POC[CHG_CONFIG] (REG01[5:4]) == 0, charge is disabled */
+ if (!v) {
+ type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ } else {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ type = (v) ? POWER_SUPPLY_CHARGE_TYPE_TRICKLE :
+ POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 chg_config, force_20pct, en_term;
+ int ret;
+
+ /*
+ * According to the "Termination when REG02[0] = 1" section of
+ * the bq24190 manual, the trickle charge could be less than the
+ * termination current so it recommends turning off the termination
+ * function.
+ *
+ * Note: AFAICT from the datasheet, the user will have to manually
+ * turn off the charging when in 20% mode. If its not turned off,
+ * there could be battery damage. So, use this mode at your own risk.
+ */
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_NONE:
+ chg_config = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
+ chg_config = 0x1;
+ force_20pct = 0x1;
+ en_term = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ chg_config = 0x1;
+ force_20pct = 0x0;
+ en_term = 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chg_config) { /* Enabling the charger */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ force_20pct);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CTTC,
+ BQ24190_REG_CTTC_EN_TERM_MASK,
+ BQ24190_REG_CTTC_EN_TERM_SHIFT,
+ en_term);
+ if (ret < 0)
+ return ret;
+ }
+
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+}
+
+static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->charger_health_valid) {
+ v = bdi->f_reg;
+ bdi->charger_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
+ /*
+ * This could be over-current or over-voltage but there's
+ * no way to tell which. Return 'OVERVOLTAGE' since there
+ * isn't an 'OVERCURRENT' value defined that we can return
+ * even if it was over-current.
+ */
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ v >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* Input Fault (VBUS OVP or VBAT<VBUS<3.8V) */
+ /*
+ * This could be over-voltage or under-voltage
+ * and there's no way to tell which. Instead
+ * of looking foolish and returning 'OVERVOLTAGE'
+ * when its really under-voltage, just return
+ * 'UNSPEC_FAILURE'.
+ */
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ case 0x2: /* Thermal Shutdown */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case 0x3: /* Charge Safety Timer Expiration */
+ health = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int bq24190_charger_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_SS,
+ BQ24190_REG_SS_PG_STAT_MASK,
+ BQ24190_REG_SS_PG_STAT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ val->intval = v;
+ return 0;
+}
+
+static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int curr, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, then current is 20% of ICHG value */
+ if (v)
+ curr /= 5;
+
+ val->intval = curr;
+ return 0;
+}
+
+static int bq24190_charger_get_current_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+
+ val->intval = bq24190_ccc_ichg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 v;
+ int ret, curr = val->intval;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, have to multiply value passed in by 5 */
+ if (v)
+ curr *= 5;
+
+ return bq24190_set_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+}
+
+static int bq24190_charger_get_voltage(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int voltage, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), &voltage);
+ if (ret < 0)
+ return ret;
+
+ val->intval = voltage;
+ return 0;
+}
+
+static int bq24190_charger_get_voltage_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
+
+ val->intval = bq24190_cvc_vreg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_voltage(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), val->intval);
+}
+
+static int bq24190_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_get_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_charger_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_charger_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_get_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = bq24190_charger_get_current_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_get_voltage(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ ret = bq24190_charger_get_voltage_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bdi->model_name;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ24190_MANUFACTURER;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_set_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_set_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_set_voltage(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_charger_properties[] = {
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static char *bq24190_charger_supplied_to[] = {
+ "main-battery",
+};
+
+static void bq24190_charger_init(struct power_supply *charger)
+{
+ charger->name = "bq24190-charger";
+ charger->type = POWER_SUPPLY_TYPE_USB;
+ charger->properties = bq24190_charger_properties;
+ charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ charger->supplied_to = bq24190_charger_supplied_to;
+ charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->get_property = bq24190_charger_get_property;
+ charger->set_property = bq24190_charger_set_property;
+ charger->property_is_writeable = bq24190_charger_property_is_writeable;
+}
+
+/* Battery power supply property routines */
+
+static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 ss_reg, chrg_fault;
+ int status, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_status_valid) {
+ chrg_fault = bdi->f_reg;
+ bdi->battery_status_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
+ if (ret < 0)
+ return ret;
+ }
+
+ chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The battery must be discharging when any of these are true:
+ * - there is no good power source;
+ * - there is a charge fault.
+ * Could also be discharging when in "supplement mode" but
+ * there is no way to tell when its in that mode.
+ */
+ if (!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK) || chrg_fault) {
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ ss_reg &= BQ24190_REG_SS_CHRG_STAT_MASK;
+ ss_reg >>= BQ24190_REG_SS_CHRG_STAT_SHIFT;
+
+ switch (ss_reg) {
+ case 0x0: /* Not Charging */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 0x1: /* Pre-charge */
+ case 0x2: /* Fast Charging */
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x3: /* Charge Termination Done */
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ ret = -EIO;
+ }
+ }
+
+ if (!ret)
+ val->intval = status;
+
+ return ret;
+}
+
+static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_health_valid) {
+ v = bdi->f_reg;
+ bdi->battery_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_NTC_FAULT_MASK;
+ v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* TS1 Cold */
+ case 0x3: /* TS2 Cold */
+ case 0x5: /* Both Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case 0x2: /* TS1 Hot */
+ case 0x4: /* TS2 Hot */
+ case 0x6: /* Both Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+ return 0;
+}
+
+static int bq24190_battery_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 batfet_disable;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, &batfet_disable);
+ if (ret < 0)
+ return ret;
+
+ val->intval = !batfet_disable;
+ return 0;
+}
+
+static int bq24190_battery_set_online(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, !val->intval);
+}
+
+static int bq24190_battery_get_temp_alert_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int temp, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), &temp);
+ if (ret < 0)
+ return ret;
+
+ val->intval = temp;
+ return 0;
+}
+
+static int bq24190_battery_set_temp_alert_max(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), val->intval);
+}
+
+static int bq24190_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq24190_battery_get_status(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_battery_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ /* Could be Li-on or Li-polymer but no way to tell which */
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_get_temp_alert_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_put_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_set_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_set_temp_alert_max(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static void bq24190_battery_init(struct power_supply *battery)
+{
+ battery->name = "bq24190-battery";
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = bq24190_battery_properties;
+ battery->num_properties = ARRAY_SIZE(bq24190_battery_properties);
+ battery->get_property = bq24190_battery_get_property;
+ battery->set_property = bq24190_battery_set_property;
+ battery->property_is_writeable = bq24190_battery_property_is_writeable;
+}
+
+static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
+{
+ struct bq24190_dev_info *bdi = data;
+ bool alert_userspace = false;
+ u8 ss_reg, f_reg;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
+ goto out;
+ }
+
+ if (ss_reg != bdi->ss_reg) {
+ /*
+ * The device is in host mode so when PG_STAT goes from 1->0
+ * (i.e., power removed) HIZ needs to be disabled.
+ */
+ if ((bdi->ss_reg & BQ24190_REG_SS_PG_STAT_MASK) &&
+ !(ss_reg & BQ24190_REG_SS_PG_STAT_MASK)) {
+ ret = bq24190_write_mask(bdi, BQ24190_REG_ISC,
+ BQ24190_REG_ISC_EN_HIZ_MASK,
+ BQ24190_REG_ISC_EN_HIZ_SHIFT,
+ 0);
+ if (ret < 0)
+ dev_err(bdi->dev, "Can't access ISC reg: %d\n",
+ ret);
+ }
+
+ bdi->ss_reg = ss_reg;
+ alert_userspace = true;
+ }
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ mutex_unlock(&bdi->f_reg_lock);
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+
+ if (f_reg != bdi->f_reg) {
+ bdi->f_reg = f_reg;
+ bdi->charger_health_valid = true;
+ bdi->battery_health_valid = true;
+ bdi->battery_status_valid = true;
+
+ alert_userspace = true;
+ }
+
+ mutex_unlock(&bdi->f_reg_lock);
+
+ /*
+ * Sometimes bq24190 gives a steady trickle of interrupts even
+ * though the watchdog timer is turned off and neither the STATUS
+ * nor FAULT registers have changed. Weed out these sprurious
+ * interrupts so userspace isn't alerted for no reason.
+ * In addition, the chip always generates an interrupt after
+ * register reset so we should ignore that one (the very first
+ * interrupt received).
+ */
+ if (alert_userspace && !bdi->first_time) {
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+ bdi->first_time = false;
+ }
+
+out:
+ pm_runtime_put_sync(bdi->dev);
+
+ dev_dbg(bdi->dev, "ss_reg: 0x%02x, f_reg: 0x%02x\n", ss_reg, f_reg);
+
+ return IRQ_HANDLED;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+ u8 v;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ /* First check that the device really is what its supposed to be */
+ ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+ BQ24190_REG_VPRS_PN_MASK,
+ BQ24190_REG_VPRS_PN_SHIFT,
+ &v);
+ if (ret < 0)
+ goto out;
+
+ if (v != bdi->model) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = bq24190_register_reset(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_set_mode_host(bdi);
+out:
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ bdi->irq = irq_of_parse_and_map(bdi->dev->of_node, 0);
+ if (bdi->irq <= 0)
+ return -1;
+
+ return 0;
+}
+#else
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ return -1;
+}
+#endif
+
+static int bq24190_setup_pdata(struct bq24190_dev_info *bdi,
+ struct bq24190_platform_data *pdata)
+{
+ int ret;
+
+ if (!gpio_is_valid(pdata->gpio_int))
+ return -1;
+
+ ret = gpio_request(pdata->gpio_int, dev_name(bdi->dev));
+ if (ret < 0)
+ return -1;
+
+ ret = gpio_direction_input(pdata->gpio_int);
+ if (ret < 0)
+ goto out;
+
+ bdi->irq = gpio_to_irq(pdata->gpio_int);
+ if (!bdi->irq)
+ goto out;
+
+ bdi->gpio_int = pdata->gpio_int;
+ return 0;
+
+out:
+ gpio_free(pdata->gpio_int);
+ return -1;
+}
+
+static int bq24190_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct device *dev = &client->dev;
+ struct bq24190_platform_data *pdata = client->dev.platform_data;
+ struct bq24190_dev_info *bdi;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+ return -ENODEV;
+ }
+
+ bdi = devm_kzalloc(dev, sizeof(*bdi), GFP_KERNEL);
+ if (!bdi) {
+ dev_err(dev, "Can't alloc bdi struct\n");
+ return -ENOMEM;
+ }
+
+ bdi->client = client;
+ bdi->dev = dev;
+ bdi->model = id->driver_data;
+ strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
+ mutex_init(&bdi->f_reg_lock);
+ bdi->first_time = true;
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ i2c_set_clientdata(client, bdi);
+
+ if (dev->of_node)
+ ret = bq24190_setup_dt(bdi);
+ else
+ ret = bq24190_setup_pdata(bdi, pdata);
+
+ if (ret) {
+ dev_err(dev, "Can't get irq info\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
+ goto out1;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
+ ret = bq24190_hw_init(bdi);
+ if (ret < 0) {
+ dev_err(dev, "Hardware init failed\n");
+ goto out2;
+ }
+
+ bq24190_charger_init(&bdi->charger);
+
+ ret = power_supply_register(dev, &bdi->charger);
+ if (ret) {
+ dev_err(dev, "Can't register charger\n");
+ goto out2;
+ }
+
+ bq24190_battery_init(&bdi->battery);
+
+ ret = power_supply_register(dev, &bdi->battery);
+ if (ret) {
+ dev_err(dev, "Can't register battery\n");
+ goto out3;
+ }
+
+ ret = bq24190_sysfs_create_group(bdi);
+ if (ret) {
+ dev_err(dev, "Can't create sysfs entries\n");
+ goto out4;
+ }
+
+ return 0;
+
+out4:
+ power_supply_unregister(&bdi->battery);
+out3:
+ power_supply_unregister(&bdi->charger);
+out2:
+ pm_runtime_disable(dev);
+out1:
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return ret;
+}
+
+static int bq24190_remove(struct i2c_client *client)
+{
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ bq24190_sysfs_remove_group(bdi);
+ power_supply_unregister(&bdi->battery);
+ power_supply_unregister(&bdi->charger);
+ pm_runtime_disable(bdi->dev);
+
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bq24190_pm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ return 0;
+}
+
+static int bq24190_pm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ /* Things may have changed while suspended so alert upper layer */
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bq24190_pm_ops, bq24190_pm_suspend, bq24190_pm_resume);
+
+/*
+ * Only support the bq24190 right now. The bq24192, bq24192i, and bq24193
+ * are similar but not identical so the driver needs to be extended to
+ * support them.
+ */
+static const struct i2c_device_id bq24190_i2c_ids[] = {
+ { "bq24190", BQ24190_REG_VPRS_PN_24190 },
+ { },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id bq24190_of_match[] = {
+ { .compatible = "ti,bq24190", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bq24190_of_match);
+#else
+static const struct of_device_id bq24190_of_match[] = {
+ { },
+};
+#endif
+
+static struct i2c_driver bq24190_driver = {
+ .probe = bq24190_probe,
+ .remove = bq24190_remove,
+ .id_table = bq24190_i2c_ids,
+ .driver = {
+ .name = "bq24190-charger",
+ .owner = THIS_MODULE,
+ .pm = &bq24190_pm_ops,
+ .of_match_table = of_match_ptr(bq24190_of_match),
+ },
+};
+module_i2c_driver(bq24190_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_ALIAS("i2c:bq24190-charger");
+MODULE_DESCRIPTION("TI BQ24190 Charger Driver");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index c58d0e31bde..d02ae02a759 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -287,7 +287,7 @@ static struct gpio collie_batt_gpios[] = {
};
#ifdef CONFIG_PM
-static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
+static int collie_bat_suspend(struct ucb1x00_dev *dev)
{
/* flush all pending status updates */
flush_work(&bat_work);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 0ee1e14f76e..b4513f284bb 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -458,6 +458,7 @@ max8925_power_dt_init(struct platform_device *pdev)
of_property_read_u32(np, "fast-charge", &fast_charge);
of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
of_property_read_u32(np, "no-temp-support", &no_temp_support);
+ of_node_put(np);
pdata->batt_detect = batt_detect;
pdata->fast_charge = fast_charge;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 3b2d5df45e7..00e66729636 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -67,23 +67,42 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
-
- power_supply_update_leds(psy);
-
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+ power_supply_update_leds(psy);
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ /*
+ * Dependent power supplies (e.g. battery) may have changed state
+ * as a result of this event, so poll again and hold the
+ * wakeup_source until all events are processed.
+ */
+ if (!psy->changed)
+ pm_relax(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ pm_stay_awake(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -500,6 +519,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto check_supplies_failed;
}
+ spin_lock_init(&psy->changed_lock);
+ rc = device_init_wakeup(dev, true);
+ if (rc)
+ goto wakeup_init_failed;
+
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
@@ -529,6 +553,7 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
+wakeup_init_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
@@ -546,6 +571,7 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
+ device_init_wakeup(psy->dev, false);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f78d73..44420d1e909 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -118,7 +118,7 @@ static ssize_t power_supply_store_property(struct device *dev,
long long_val;
/* TODO: support other types than int */
- ret = strict_strtol(buf, 10, &long_val);
+ ret = kstrtol(buf, 10, &long_val);
if (ret < 0)
return ret;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ee039dcead0..9b3ea535b47 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -14,6 +14,12 @@ config POWER_RESET_GPIO
If your board needs a GPIO high/low to power down, say Y and
create a binding in your devicetree.
+config POWER_RESET_MSM
+ bool "Qualcomm MSM power-off driver"
+ depends on POWER_RESET && ARCH_MSM
+ help
+ Power off and restart support for Qualcomm boards.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && POWER_RESET && PLAT_ORION
@@ -34,7 +40,14 @@ config POWER_RESET_RESTART
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on POWER_RESET
+ depends on POWER_RESET && VEXPRESS_CONFIG
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
+
+config POWER_RESET_XGENE
+ bool "APM SoC X-Gene reset driver"
+ depends on ARM64
+ depends on POWER_RESET
+ help
+ Reboot support for the APM SoC X-Gene Eval boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 372807fd83f..3e6ed88725a 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
new file mode 100644
index 00000000000..774f9a3b310
--- /dev/null
+++ b/drivers/power/reset/msm-poweroff.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+static void __iomem *msm_ps_hold;
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ writel(0, msm_ps_hold);
+ mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+ /* TODO: Add poweroff capability */
+ do_msm_restart(REBOOT_HARD, NULL);
+}
+
+static int msm_restart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msm_ps_hold = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(msm_ps_hold))
+ return PTR_ERR(msm_ps_hold);
+
+ pm_power_off = do_msm_poweroff;
+ arm_pm_restart = do_msm_restart;
+ return 0;
+}
+
+static const struct of_device_id of_msm_restart_match[] = {
+ { .compatible = "qcom,pshold", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_msm_restart_match);
+
+static struct platform_driver msm_restart_driver = {
+ .probe = msm_restart_probe,
+ .driver = {
+ .name = "msm-restart",
+ .of_match_table = of_match_ptr(of_msm_restart_match),
+ },
+};
+
+static int __init msm_restart_init(void)
+{
+ return platform_driver_register(&msm_restart_driver);
+}
+device_initcall(msm_restart_init);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
new file mode 100644
index 00000000000..ecd55f81b9d
--- /dev/null
+++ b/drivers/power/reset/xgene-reboot.c
@@ -0,0 +1,103 @@
+/*
+ * AppliedMicro X-Gene SoC Reboot Driver
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver provides system reboot functionality for APM X-Gene SoC.
+ * For system shutdown, this is board specify. If a board designer
+ * implements GPIO shutdown, use the gpio-poweroff.c driver.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <asm/system_misc.h>
+
+struct xgene_reboot_context {
+ struct platform_device *pdev;
+ void *csr;
+ u32 mask;
+};
+
+static struct xgene_reboot_context *xgene_restart_ctx;
+
+static void xgene_restart(char str, const char *cmd)
+{
+ struct xgene_reboot_context *ctx = xgene_restart_ctx;
+ unsigned long timeout;
+
+ /* Issue the reboot */
+ if (ctx)
+ writel(ctx->mask, ctx->csr);
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout))
+ cpu_relax();
+
+ dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+}
+
+static int xgene_reboot_probe(struct platform_device *pdev)
+{
+ struct xgene_reboot_context *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "out of memory for context\n");
+ return -ENODEV;
+ }
+
+ ctx->csr = of_iomap(pdev->dev.of_node, 0);
+ if (!ctx->csr) {
+ devm_kfree(&pdev->dev, ctx);
+ dev_err(&pdev->dev, "can not map resource\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ ctx->mask = 0xFFFFFFFF;
+
+ ctx->pdev = pdev;
+ arm_pm_restart = xgene_restart;
+ xgene_restart_ctx = ctx;
+
+ return 0;
+}
+
+static struct of_device_id xgene_reboot_of_match[] = {
+ { .compatible = "apm,xgene-reboot" },
+ {}
+};
+
+static struct platform_driver xgene_reboot_driver = {
+ .probe = xgene_reboot_probe,
+ .driver = {
+ .name = "xgene-reboot",
+ .of_match_table = xgene_reboot_of_match,
+ },
+};
+
+static int __init xgene_reboot_init(void)
+{
+ return platform_driver_register(&xgene_reboot_driver);
+}
+device_initcall(xgene_reboot_init);
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 8a6288d8705..1bc5857b8bd 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/i2c/twl4030-madc.h>
+/* RX51 specific channels */
+#define TWL4030_MADC_BTEMP_RX51 TWL4030_MADC_ADCIN0
+#define TWL4030_MADC_BCI_RX51 TWL4030_MADC_ADCIN4
+
struct rx51_device_info {
struct device *dev;
struct power_supply bat;
@@ -37,7 +41,7 @@ static int rx51_battery_read_adc(int channel)
{
struct twl4030_madc_request req;
- req.channels = 1 << channel;
+ req.channels = channel;
req.do_avg = 1;
req.method = TWL4030_MADC_SW1;
req.func_cb = NULL;
@@ -47,7 +51,7 @@ static int rx51_battery_read_adc(int channel)
if (twl4030_madc_conversion(&req) <= 0)
return -ENODATA;
- return req.rbuf[channel];
+ return req.rbuf[ffs(channel) - 1];
}
/*
@@ -56,7 +60,7 @@ static int rx51_battery_read_adc(int channel)
*/
static int rx51_battery_read_voltage(struct rx51_device_info *di)
{
- int voltage = rx51_battery_read_adc(12);
+ int voltage = rx51_battery_read_adc(TWL4030_MADC_VBAT);
if (voltage < 0)
return voltage;
@@ -108,7 +112,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
{
int min = 0;
int max = ARRAY_SIZE(rx51_temp_table2) - 1;
- int raw = rx51_battery_read_adc(0);
+ int raw = rx51_battery_read_adc(TWL4030_MADC_BTEMP_RX51);
/* Zero and negative values are undefined */
if (raw <= 0)
@@ -142,7 +146,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
*/
static int rx51_battery_read_capacity(struct rx51_device_info *di)
{
- int capacity = rx51_battery_read_adc(4);
+ int capacity = rx51_battery_read_adc(TWL4030_MADC_BCI_RX51);
if (capacity < 0)
return capacity;
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 0224de50c54..f4d80df627c 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -150,7 +150,7 @@ static void tosa_bat_external_power_changed(struct power_supply *psy)
static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
{
- pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+ pr_info("tosa_bat_gpio irq\n");
schedule_work(&bat_work);
return IRQ_HANDLED;
}
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index be98e70380f..d98abe911e3 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -189,7 +189,12 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
/* Need to keep regulator on */
if (!bci->usb_enabled) {
- regulator_enable(bci->usb_reg);
+ ret = regulator_enable(bci->usb_reg);
+ if (ret) {
+ dev_err(bci->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
bci->usb_enabled = 1;
}
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
new file mode 100644
index 00000000000..7ef445a6cfa
--- /dev/null
+++ b/drivers/power/twl4030_madc_battery.c
@@ -0,0 +1,245 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Lukas Märdian <lukas@goldelico.com>
+ *
+ * Based on dumb driver for gta01 battery
+ * Copyright 2009 Openmoko, Inc
+ * Balaji Rao <balajirrao@openmoko.org>
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/power/twl4030_madc_battery.h>
+
+struct twl4030_madc_battery {
+ struct power_supply psy;
+ struct twl4030_madc_bat_platform_data *pdata;
+};
+
+static enum power_supply_property twl4030_madc_bat_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+};
+
+static int madc_read(int index)
+{
+ struct twl4030_madc_request req;
+ int val;
+
+ req.channels = index;
+ req.method = TWL4030_MADC_SW2;
+ req.type = TWL4030_MADC_WAIT;
+ req.do_avg = 0;
+ req.raw = false;
+ req.func_cb = NULL;
+
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return req.rbuf[ffs(index) - 1];
+}
+
+static int twl4030_madc_bat_get_charging_status(void)
+{
+ return (madc_read(TWL4030_MADC_ICHG) > 0) ? 1 : 0;
+}
+
+static int twl4030_madc_bat_get_voltage(void)
+{
+ return madc_read(TWL4030_MADC_VBAT);
+}
+
+static int twl4030_madc_bat_get_current(void)
+{
+ return madc_read(TWL4030_MADC_ICHG) * 1000;
+}
+
+static int twl4030_madc_bat_get_temp(void)
+{
+ return madc_read(TWL4030_MADC_BTEMP) * 10;
+}
+
+static int twl4030_madc_bat_voltscale(struct twl4030_madc_battery *bat,
+ int volt)
+{
+ struct twl4030_madc_bat_calibration *calibration;
+ int i, res = 0;
+
+ /* choose charging curve */
+ if (twl4030_madc_bat_get_charging_status())
+ calibration = bat->pdata->charging;
+ else
+ calibration = bat->pdata->discharging;
+
+ if (volt > calibration[0].voltage) {
+ res = calibration[0].level;
+ } else {
+ for (i = 0; calibration[i+1].voltage >= 0; i++) {
+ if (volt <= calibration[i].voltage &&
+ volt >= calibration[i+1].voltage) {
+ /* interval found - interpolate within range */
+ res = calibration[i].level -
+ ((calibration[i].voltage - volt) *
+ (calibration[i].level -
+ calibration[i+1].level)) /
+ (calibration[i].voltage -
+ calibration[i+1].voltage);
+ break;
+ }
+ }
+ }
+ return res;
+}
+
+static int twl4030_madc_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage()) > 95)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else {
+ if (twl4030_madc_bat_get_charging_status())
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = twl4030_madc_bat_get_voltage() * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = twl4030_madc_bat_get_current();
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* assume battery is always present */
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ val->intval = (percent * bat->pdata->capacity) / 100;
+ break;
+ }
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = bat->pdata->capacity;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = twl4030_madc_bat_get_temp();
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ /* in mAh */
+ int chg = (percent * (bat->pdata->capacity/1000))/100;
+
+ /* assume discharge with 400 mA (ca. 1.5W) */
+ val->intval = (3600l * chg) / 400;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ power_supply_changed(&bat->psy);
+}
+
+static int twl4030_cmp(const void *a, const void *b)
+{
+ return ((struct twl4030_madc_bat_calibration *)b)->voltage -
+ ((struct twl4030_madc_bat_calibration *)a)->voltage;
+}
+
+static int twl4030_madc_battery_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+ return -ENOMEM;
+
+ twl4030_madc_bat->psy.name = "twl4030_battery";
+ twl4030_madc_bat->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ twl4030_madc_bat->psy.properties = twl4030_madc_bat_props;
+ twl4030_madc_bat->psy.num_properties =
+ ARRAY_SIZE(twl4030_madc_bat_props);
+ twl4030_madc_bat->psy.get_property = twl4030_madc_bat_get_property;
+ twl4030_madc_bat->psy.external_power_changed =
+ twl4030_madc_bat_ext_changed;
+
+ /* sort charging and discharging calibration data */
+ sort(pdata->charging, pdata->charging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+ sort(pdata->discharging, pdata->discharging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+ power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+
+ return 0;
+}
+
+static int twl4030_madc_battery_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&bat->psy);
+ kfree(bat);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_battery_driver = {
+ .driver = {
+ .name = "twl4030_madc_battery",
+ },
+ .probe = twl4030_madc_battery_probe,
+ .remove = twl4030_madc_battery_remove,
+};
+module_platform_driver(twl4030_madc_battery_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lukas Märdian <lukas@goldelico.com>");
+MODULE_DESCRIPTION("twl4030_madc battery driver");
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index eae0eda9ff3..9966124ad98 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -184,7 +184,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
{
struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
pps_unregister_source(data->pps);
dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9e3498bf302..9654aa3c05c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1249,6 +1249,15 @@ config RTC_DRV_SIRFSOC
Say "yes" here to support the real time clock on SiRF SOC chips.
This driver can also be built as a module called rtc-sirfsoc.
+config RTC_DRV_MOXART
+ tristate "MOXA ART RTC"
+ help
+ If you say yes here you get support for the MOXA ART
+ RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-moxart
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d3b4488f48f..2dff3d2009b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -130,3 +130,4 @@ obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
+obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index be06d7150de..24e733c98f8 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1018,23 +1018,6 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
cmos_do_remove(&pnp->dev);
}
-#ifdef CONFIG_PM
-
-static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
-{
- return cmos_suspend(&pnp->dev);
-}
-
-static int cmos_pnp_resume(struct pnp_dev *pnp)
-{
- return cmos_resume(&pnp->dev);
-}
-
-#else
-#define cmos_pnp_suspend NULL
-#define cmos_pnp_resume NULL
-#endif
-
static void cmos_pnp_shutdown(struct pnp_dev *pnp)
{
if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
@@ -1060,8 +1043,11 @@ static struct pnp_driver cmos_pnp_driver = {
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
- .suspend = cmos_pnp_suspend,
- .resume = cmos_pnp_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &cmos_pm_ops,
+ },
+#endif
};
#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 308a8fefe76..bc7b4fcf603 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -89,7 +89,6 @@ enum ds1511reg {
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
- int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
int alrm_sec;
@@ -479,20 +478,14 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
struct rtc_plat_data *pdata;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
- pdev->name))
- return -EBUSY;
- ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
- if (!ds1511_base)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ds1511_base))
+ return PTR_ERR(ds1511_base);
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 8c6c952e90b..fd31571941f 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -285,19 +285,14 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
- if (!ioaddr)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index eccdc62ae1c..17b73fdc3b6 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -52,11 +52,9 @@
#define RTC_BATT_FLAG 0x80
struct rtc_plat_data {
- struct rtc_device *rtc;
void __iomem *ioaddr_nvram;
void __iomem *ioaddr_rtc;
size_t size_nvram;
- size_t size;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
@@ -117,11 +115,7 @@ static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* year is 1900 + tm->tm_year */
tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900;
- if (rtc_valid_tm(tm) < 0) {
- dev_err(dev, "retrieved date/time is not valid.\n");
- rtc_time_to_tm(0, tm);
- }
- return 0;
+ return rtc_valid_tm(tm);
}
static const struct rtc_class_ops ds1742_rtc_ops = {
@@ -168,22 +162,17 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
- if (!ioaddr)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr_nvram = ioaddr;
- pdata->size_nvram = pdata->size - RTC_SIZE;
+ pdata->size_nvram = resource_size(res) - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
sysfs_bin_attr_init(&pdata->nvram_attr);
@@ -212,7 +201,6 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
&ds1742_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- pdata->rtc = rtc;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 549b3c3792d..580e7b56bde 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -138,17 +138,9 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ep93xx_rtc->mmio_base)
- return -ENXIO;
+ ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ep93xx_rtc->mmio_base))
+ return PTR_ERR(ep93xx_rtc->mmio_base);
pdev->dev.platform_data = ep93xx_rtc;
platform_set_drvdata(pdev, ep93xx_rtc);
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 7273b0139e5..4e2a81854f5 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -23,10 +23,6 @@
#include <linux/iio/iio.h>
#include <linux/rtc.h>
-/* Format: HID-SENSOR-usage_id_in_hex */
-/* Usage ID from spec for Time: 0x2000A0 */
-#define DRIVER_NAME "HID-SENSOR-2000a0" /* must be lowercase */
-
enum hid_time_channel {
CHANNEL_SCAN_INDEX_YEAR,
CHANNEL_SCAN_INDEX_MONTH,
@@ -283,9 +279,11 @@ static int hid_time_probe(struct platform_device *pdev)
"hid-sensor-time", &hid_time_rtc_ops,
THIS_MODULE);
- if (IS_ERR(time_state->rtc)) {
+ if (IS_ERR_OR_NULL(time_state->rtc)) {
+ ret = time_state->rtc ? PTR_ERR(time_state->rtc) : -ENODEV;
+ time_state->rtc = NULL;
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TIME);
dev_err(&pdev->dev, "rtc device register failed!\n");
- return PTR_ERR(time_state->rtc);
}
return ret;
@@ -300,9 +298,19 @@ static int hid_time_remove(struct platform_device *pdev)
return 0;
}
+static struct platform_device_id hid_time_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-2000a0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_time_ids);
+
static struct platform_driver hid_time_platform_driver = {
+ .id_table = hid_time_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_time_probe,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d3a8c8e255d..abd7f9091f3 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -375,24 +375,16 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
struct imxdi_dev *imxdi;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL);
if (!imxdi)
return -ENOMEM;
imxdi->pdev = pdev;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- return -EBUSY;
-
- imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (imxdi->ioaddr == NULL)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imxdi->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imxdi->ioaddr))
+ return PTR_ERR(imxdi->ioaddr);
spin_lock_init(&imxdi->irq_lock);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 8276ae94a2a..bfdbcb82d06 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -201,16 +201,9 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct lpc32xx_rtc *rtc;
- resource_size_t size;
int rtcirq;
u32 tmp;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get memory resource\n");
- return -ENOENT;
- }
-
rtcirq = platform_get_irq(pdev, 0);
if (rtcirq < 0 || rtcirq >= NR_IRQS) {
dev_warn(&pdev->dev, "Can't get interrupt resource\n");
@@ -224,19 +217,10 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
}
rtc->irq = rtcirq;
- size = resource_size(res);
-
- if (!devm_request_mem_region(&pdev->dev, res->start, size,
- pdev->name)) {
- dev_err(&pdev->dev, "RTC registers are not free\n");
- return -EBUSY;
- }
-
- rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
- if (!rtc->rtc_base) {
- dev_err(&pdev->dev, "Can't map memory\n");
- return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rtc->rtc_base))
+ return PTR_ERR(rtc->rtc_base);
spin_lock_init(&rtc->lock);
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9915cb96014..9efe118a28b 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -240,9 +240,9 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
alrm->pending = 0;
- ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+ ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+ dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
__func__, __LINE__, ret);
goto out;
}
diff --git a/drivers/rtc/rtc-moxart.c b/drivers/rtc/rtc-moxart.c
new file mode 100644
index 00000000000..c29dee0946e
--- /dev/null
+++ b/drivers/rtc/rtc-moxart.c
@@ -0,0 +1,330 @@
+/*
+ * MOXA ART RTC driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#define GPIO_RTC_RESERVED 0x0C
+#define GPIO_RTC_DATA_SET 0x10
+#define GPIO_RTC_DATA_CLEAR 0x14
+#define GPIO_RTC_PIN_PULL_ENABLE 0x18
+#define GPIO_RTC_PIN_PULL_TYPE 0x1C
+#define GPIO_RTC_INT_ENABLE 0x20
+#define GPIO_RTC_INT_RAW_STATE 0x24
+#define GPIO_RTC_INT_MASKED_STATE 0x28
+#define GPIO_RTC_INT_MASK 0x2C
+#define GPIO_RTC_INT_CLEAR 0x30
+#define GPIO_RTC_INT_TRIGGER 0x34
+#define GPIO_RTC_INT_BOTH 0x38
+#define GPIO_RTC_INT_RISE_NEG 0x3C
+#define GPIO_RTC_BOUNCE_ENABLE 0x40
+#define GPIO_RTC_BOUNCE_PRE_SCALE 0x44
+#define GPIO_RTC_PROTECT_W 0x8E
+#define GPIO_RTC_PROTECT_R 0x8F
+#define GPIO_RTC_YEAR_W 0x8C
+#define GPIO_RTC_YEAR_R 0x8D
+#define GPIO_RTC_DAY_W 0x8A
+#define GPIO_RTC_DAY_R 0x8B
+#define GPIO_RTC_MONTH_W 0x88
+#define GPIO_RTC_MONTH_R 0x89
+#define GPIO_RTC_DATE_W 0x86
+#define GPIO_RTC_DATE_R 0x87
+#define GPIO_RTC_HOURS_W 0x84
+#define GPIO_RTC_HOURS_R 0x85
+#define GPIO_RTC_MINUTES_W 0x82
+#define GPIO_RTC_MINUTES_R 0x83
+#define GPIO_RTC_SECONDS_W 0x80
+#define GPIO_RTC_SECONDS_R 0x81
+#define GPIO_RTC_DELAY_TIME 8
+
+struct moxart_rtc {
+ struct rtc_device *rtc;
+ spinlock_t rtc_lock;
+ int gpio_data, gpio_sclk, gpio_reset;
+};
+
+static int day_of_year[12] = { 0, 31, 59, 90, 120, 151, 181,
+ 212, 243, 273, 304, 334 };
+
+static void moxart_rtc_write_byte(struct device *dev, u8 data)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < 8; i++, data >>= 1) {
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_data, ((data & 1) == 1));
+ udelay(GPIO_RTC_DELAY_TIME);
+ gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ }
+}
+
+static u8 moxart_rtc_read_byte(struct device *dev)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ int i;
+ u8 data = 0;
+
+ for (i = 0; i < 8; i++) {
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+ gpio_set_value(moxart_rtc->gpio_sclk, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ if (gpio_get_value(moxart_rtc->gpio_data))
+ data |= (1 << i);
+ udelay(GPIO_RTC_DELAY_TIME);
+ }
+ return data;
+}
+
+static u8 moxart_rtc_read_register(struct device *dev, u8 cmd)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ u8 data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ gpio_direction_output(moxart_rtc->gpio_data, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ moxart_rtc_write_byte(dev, cmd);
+ gpio_direction_input(moxart_rtc->gpio_data);
+ udelay(GPIO_RTC_DELAY_TIME);
+ data = moxart_rtc_read_byte(dev);
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+
+ local_irq_restore(flags);
+
+ return data;
+}
+
+static void moxart_rtc_write_register(struct device *dev, u8 cmd, u8 data)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ gpio_direction_output(moxart_rtc->gpio_data, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 1);
+ udelay(GPIO_RTC_DELAY_TIME);
+ moxart_rtc_write_byte(dev, cmd);
+ moxart_rtc_write_byte(dev, data);
+ gpio_set_value(moxart_rtc->gpio_sclk, 0);
+ gpio_set_value(moxart_rtc->gpio_reset, 0);
+ udelay(GPIO_RTC_DELAY_TIME);
+
+ local_irq_restore(flags);
+}
+
+static int moxart_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&moxart_rtc->rtc_lock);
+
+ moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0);
+ moxart_rtc_write_register(dev, GPIO_RTC_YEAR_W,
+ (((tm->tm_year - 100) / 10) << 4) |
+ ((tm->tm_year - 100) % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_MONTH_W,
+ (((tm->tm_mon + 1) / 10) << 4) |
+ ((tm->tm_mon + 1) % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_DATE_W,
+ ((tm->tm_mday / 10) << 4) |
+ (tm->tm_mday % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_HOURS_W,
+ ((tm->tm_hour / 10) << 4) |
+ (tm->tm_hour % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_MINUTES_W,
+ ((tm->tm_min / 10) << 4) |
+ (tm->tm_min % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_SECONDS_W,
+ ((tm->tm_sec / 10) << 4) |
+ (tm->tm_sec % 10));
+
+ moxart_rtc_write_register(dev, GPIO_RTC_PROTECT_W, 0x80);
+
+ spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+ dev_dbg(dev, "%s: success tm_year=%d tm_mon=%d\n"
+ "tm_mday=%d tm_hour=%d tm_min=%d tm_sec=%d\n",
+ __func__, tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+static int moxart_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct moxart_rtc *moxart_rtc = dev_get_drvdata(dev);
+ unsigned char v;
+
+ spin_lock_irq(&moxart_rtc->rtc_lock);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_SECONDS_R);
+ tm->tm_sec = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_MINUTES_R);
+ tm->tm_min = (((v & 0x70) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_HOURS_R);
+ if (v & 0x80) { /* 12-hour mode */
+ tm->tm_hour = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+ if (v & 0x20) { /* PM mode */
+ tm->tm_hour += 12;
+ if (tm->tm_hour >= 24)
+ tm->tm_hour = 0;
+ }
+ } else { /* 24-hour mode */
+ tm->tm_hour = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+ }
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_DATE_R);
+ tm->tm_mday = (((v & 0x30) >> 4) * 10) + (v & 0x0F);
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_MONTH_R);
+ tm->tm_mon = (((v & 0x10) >> 4) * 10) + (v & 0x0F);
+ tm->tm_mon--;
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_YEAR_R);
+ tm->tm_year = (((v & 0xF0) >> 4) * 10) + (v & 0x0F);
+ tm->tm_year += 100;
+ if (tm->tm_year <= 69)
+ tm->tm_year += 100;
+
+ v = moxart_rtc_read_register(dev, GPIO_RTC_DAY_R);
+ tm->tm_wday = (v & 0x0f) - 1;
+ tm->tm_yday = day_of_year[tm->tm_mon];
+ tm->tm_yday += (tm->tm_mday - 1);
+ if (tm->tm_mon >= 2) {
+ if (!(tm->tm_year % 4) && (tm->tm_year % 100))
+ tm->tm_yday++;
+ }
+
+ tm->tm_isdst = 0;
+
+ spin_unlock_irq(&moxart_rtc->rtc_lock);
+
+ return 0;
+}
+
+static const struct rtc_class_ops moxart_rtc_ops = {
+ .read_time = moxart_rtc_read_time,
+ .set_time = moxart_rtc_set_time,
+};
+
+static int moxart_rtc_probe(struct platform_device *pdev)
+{
+ struct moxart_rtc *moxart_rtc;
+ int ret = 0;
+
+ moxart_rtc = devm_kzalloc(&pdev->dev, sizeof(*moxart_rtc), GFP_KERNEL);
+ if (!moxart_rtc) {
+ dev_err(&pdev->dev, "devm_kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ moxart_rtc->gpio_data = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-data", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_data)) {
+ dev_err(&pdev->dev, "invalid gpio (data): %d\n",
+ moxart_rtc->gpio_data);
+ return moxart_rtc->gpio_data;
+ }
+
+ moxart_rtc->gpio_sclk = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-sclk", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_sclk)) {
+ dev_err(&pdev->dev, "invalid gpio (sclk): %d\n",
+ moxart_rtc->gpio_sclk);
+ return moxart_rtc->gpio_sclk;
+ }
+
+ moxart_rtc->gpio_reset = of_get_named_gpio(pdev->dev.of_node,
+ "gpio-rtc-reset", 0);
+ if (!gpio_is_valid(moxart_rtc->gpio_reset)) {
+ dev_err(&pdev->dev, "invalid gpio (reset): %d\n",
+ moxart_rtc->gpio_reset);
+ return moxart_rtc->gpio_reset;
+ }
+
+ spin_lock_init(&moxart_rtc->rtc_lock);
+ platform_set_drvdata(pdev, moxart_rtc);
+
+ ret = devm_gpio_request(&pdev->dev, moxart_rtc->gpio_data, "rtc_data");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_data gpio\n");
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_sclk,
+ GPIOF_DIR_OUT, "rtc_sclk");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_sclk gpio\n");
+ return ret;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, moxart_rtc->gpio_reset,
+ GPIOF_DIR_OUT, "rtc_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "can't get rtc_reset gpio\n");
+ return ret;
+ }
+
+ moxart_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &moxart_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(moxart_rtc->rtc)) {
+ dev_err(&pdev->dev, "devm_rtc_device_register failed\n");
+ return PTR_ERR(moxart_rtc->rtc);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id moxart_rtc_match[] = {
+ { .compatible = "moxa,moxart-rtc" },
+ { },
+};
+
+static struct platform_driver moxart_rtc_driver = {
+ .probe = moxart_rtc_probe,
+ .driver = {
+ .name = "moxart-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_rtc_match,
+ },
+};
+module_platform_driver(moxart_rtc_driver);
+
+MODULE_DESCRIPTION("MOXART RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index baab802f215..d536c5962c9 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -221,26 +221,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
- resource_size_t size;
u32 rtc_time;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, size,
- pdev->name))
- return -EBUSY;
-
- pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, size);
- if (!pdata->ioaddr)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
/* Not all SoCs require a clock.*/
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index ab87bacb8f8..50c57264554 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -377,22 +377,16 @@ static int mxc_rtc_probe(struct platform_device *pdev)
unsigned long rate;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->devtype = pdev->id_entry->driver_data;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 22861c5e0c5..248653c74b8 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -99,7 +99,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
if (!timeout)
return ERR_PTR(-EPERM);
- return 0;
+ return NULL;
}
static int nuc900_rtc_bcd2bin(unsigned int timereg,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c6ffbaec32a..c7d97ee5932 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,8 @@
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
+#define OMAP_RTC_IRQWAKEEN 0x7c
+
/* OMAP_RTC_CTRL_REG bit fields: */
#define OMAP_RTC_CTRL_SPLIT (1<<7)
#define OMAP_RTC_CTRL_DISABLE (1<<6)
@@ -94,12 +96,21 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+/* OMAP_RTC_IRQWAKEEN bit fields: */
+#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN (1<<1)
+
/* OMAP_RTC_KICKER values */
#define KICK0_VALUE 0x83e70b13
#define KICK1_VALUE 0x95a4f1e0
#define OMAP_RTC_HAS_KICKER 0x1
+/*
+ * Few RTC IP revisions has special WAKE-EN Register to enable Wakeup
+ * generation for event Alarm.
+ */
+#define OMAP_RTC_HAS_IRQWAKEEN 0x2
+
static void __iomem *rtc_base;
#define rtc_read(addr) readb(rtc_base + (addr))
@@ -299,12 +310,18 @@ static struct rtc_class_ops omap_rtc_ops = {
static int omap_rtc_alarm;
static int omap_rtc_timer;
-#define OMAP_RTC_DATA_DA830_IDX 1
+#define OMAP_RTC_DATA_AM3352_IDX 1
+#define OMAP_RTC_DATA_DA830_IDX 2
static struct platform_device_id omap_rtc_devtype[] = {
{
.name = DRIVER_NAME,
- }, {
+ },
+ [OMAP_RTC_DATA_AM3352_IDX] = {
+ .name = "am3352-rtc",
+ .driver_data = OMAP_RTC_HAS_KICKER | OMAP_RTC_HAS_IRQWAKEEN,
+ },
+ [OMAP_RTC_DATA_DA830_IDX] = {
.name = "da830-rtc",
.driver_data = OMAP_RTC_HAS_KICKER,
},
@@ -316,6 +333,9 @@ static const struct of_device_id omap_rtc_of_match[] = {
{ .compatible = "ti,da830-rtc",
.data = &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
},
+ { .compatible = "ti,am3352-rtc",
+ .data = &omap_rtc_devtype[OMAP_RTC_DATA_AM3352_IDX],
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
@@ -464,16 +484,28 @@ static u8 irqstat;
static int omap_rtc_suspend(struct device *dev)
{
+ u8 irqwake_stat;
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+
irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
/* FIXME the RTC alarm is not currently acting as a wakeup event
- * source, and in fact this enable() call is just saving a flag
- * that's never used...
+ * source on some platforms, and in fact this enable() call is just
+ * saving a flag that's never used...
*/
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
enable_irq_wake(omap_rtc_alarm);
- else
+
+ if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+ irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+ irqwake_stat |= OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+ rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+ }
+ } else {
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+ }
/* Disable the clock/module */
pm_runtime_put_sync(dev);
@@ -483,13 +515,25 @@ static int omap_rtc_suspend(struct device *dev)
static int omap_rtc_resume(struct device *dev)
{
+ u8 irqwake_stat;
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
+
/* Enable the clock/module so that we can access the registers */
pm_runtime_get_sync(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
disable_irq_wake(omap_rtc_alarm);
- else
+
+ if (id_entry->driver_data & OMAP_RTC_HAS_IRQWAKEEN) {
+ irqwake_stat = rtc_read(OMAP_RTC_IRQWAKEEN);
+ irqwake_stat &= ~OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN;
+ rtc_write(irqwake_stat, OMAP_RTC_IRQWAKEEN);
+ }
+ } else {
rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+ }
return 0;
}
#endif
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index a1fecc8d97f..fffb7d3449d 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -238,6 +238,15 @@ static int palmas_rtc_probe(struct platform_device *pdev)
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_rtc *palmas_rtc = NULL;
int ret;
+ bool enable_bb_charging = false;
+ bool high_bb_charging;
+
+ if (pdev->dev.of_node) {
+ enable_bb_charging = of_property_read_bool(pdev->dev.of_node,
+ "ti,backup-battery-chargeable");
+ high_bb_charging = of_property_read_bool(pdev->dev.of_node,
+ "ti,backup-battery-charge-high-current");
+ }
palmas_rtc = devm_kzalloc(&pdev->dev, sizeof(struct palmas_rtc),
GFP_KERNEL);
@@ -254,6 +263,32 @@ static int palmas_rtc_probe(struct platform_device *pdev)
palmas_rtc->dev = &pdev->dev;
platform_set_drvdata(pdev, palmas_rtc);
+ if (enable_bb_charging) {
+ unsigned reg = PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG;
+
+ if (high_bb_charging)
+ reg = 0;
+
+ ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_BACKUP_BATTERY_CTRL,
+ PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG, reg);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+ PALMAS_BACKUP_BATTERY_CTRL,
+ PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN,
+ PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "BACKUP_BATTERY_CTRL update failed, %d\n", ret);
+ return ret;
+ }
+ }
+
/* Start RTC */
ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
PALMAS_RTC_CTRL_REG_STOP_RTC,
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 205b9f7da1b..1ee514a3972 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -203,11 +203,6 @@ static int pcf2127_probe(struct i2c_client *client,
return 0;
}
-static int pcf2127_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static const struct i2c_device_id pcf2127_id[] = {
{ "pcf2127", 0 },
{ }
@@ -229,7 +224,6 @@ static struct i2c_driver pcf2127_driver = {
.of_match_table = of_match_ptr(pcf2127_of_match),
},
.probe = pcf2127_probe,
- .remove = pcf2127_remove,
.id_table = pcf2127_id,
};
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index aa7ed4b5f7f..63460cf80f1 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -44,6 +44,7 @@ struct sirfsoc_rtc_drv {
struct rtc_device *rtc;
u32 rtc_base;
u32 irq;
+ unsigned irq_wake;
/* Overflow for every 8 years extra time */
u32 overflow_rtc;
#ifdef CONFIG_PM
@@ -355,8 +356,8 @@ static int sirfsoc_rtc_suspend(struct device *dev)
rtcdrv->saved_counter =
sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_CN);
rtcdrv->saved_overflow_rtc = rtcdrv->overflow_rtc;
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(rtcdrv->irq);
+ if (device_may_wakeup(&pdev->dev) && !enable_irq_wake(rtcdrv->irq))
+ rtcdrv->irq_wake = 1;
return 0;
}
@@ -423,8 +424,10 @@ static int sirfsoc_rtc_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
sirfsoc_rtc_thaw(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
+ rtcdrv->irq_wake = 0;
+ }
return 0;
}
@@ -434,8 +437,10 @@ static int sirfsoc_rtc_restore(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sirfsoc_rtc_drv *rtcdrv = platform_get_drvdata(pdev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev) && rtcdrv->irq_wake) {
disable_irq_wake(rtcdrv->irq);
+ rtcdrv->irq_wake = 0;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index af5e97e3f27..a176ba61468 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -294,19 +294,14 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
void __iomem *ioaddr;
int ret = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
- pdev->name))
- return -EBUSY;
- ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
- if (!ioaddr)
- return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index f9a0677e4e3..4f87234e0de 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -244,9 +244,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
struct resource *res;
int irq, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
@@ -255,13 +252,10 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
- pdata->rtcreg = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!pdata->rtcreg)
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->rtcreg))
+ return PTR_ERR(pdata->rtcreg);
spin_lock_init(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index feca317b33d..92bd22ce676 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -645,7 +645,7 @@ dasd_diag_init(void)
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
@@ -655,7 +655,7 @@ static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 96e52bf7593..f93cc32eb81 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -524,20 +524,20 @@ static const struct file_operations fs3270_fops = {
.llseek = no_llseek,
};
-void fs3270_create_cb(int minor)
+static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
-void fs3270_destroy_cb(int minor)
+static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
-struct raw3270_notifier fs3270_notifier =
+static struct raw3270_notifier fs3270_notifier =
{
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3e4fb4e858d..a3aa374799d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -910,12 +910,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1131,7 +1131,7 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index cee69dac3e1..a0f47c83fd6 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1845,17 +1845,17 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
-void tty3270_create_cb(int minor)
+static void tty3270_create_cb(int minor)
{
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
}
-void tty3270_destroy_cb(int minor)
+static void tty3270_destroy_cb(int minor)
{
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
}
-struct raw3270_notifier tty3270_notifier =
+static struct raw3270_notifier tty3270_notifier =
{
.create = tty3270_create_cb,
.destroy = tty3270_destroy_cb,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 9e5e14686e7..794820a123d 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,8 +30,8 @@
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
-#define TO_USER 0
-#define TO_KERNEL 1
+#define TO_USER 1
+#define TO_KERNEL 0
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
@@ -73,7 +73,7 @@ static struct ipl_parameter_block *ipl_block;
* @count: Size of buffer, which should be copied
* @mode: Either TO_KERNEL or TO_USER
*/
-static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
+int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
{
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d4174b82a1a..02300dcfac9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -413,7 +413,7 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
if (special == 1)
reg0 |= 0x400000UL;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2ea6165366b..af2166fa515 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -472,7 +472,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6488a7351a6..7e8346ec9cd 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -38,14 +38,6 @@
#include "acornfb.h"
/*
- * VIDC machines can't do 16 or 32BPP modes.
- */
-#ifdef HAS_VIDC
-#undef FBCON_HAS_CFB16
-#undef FBCON_HAS_CFB32
-#endif
-
-/*
* Default resolution.
* NOTE that it has to be supported in the table towards
* the end of this file.
@@ -106,238 +98,6 @@ static struct vidc_timing current_vidc;
extern unsigned int vram_size; /* set by setup.c */
-#ifdef HAS_VIDC
-
-#define MAX_SIZE 480*1024
-
-/* CTL VIDC Actual
- * 24.000 0 8.000
- * 25.175 0 8.392
- * 36.000 0 12.000
- * 24.000 1 12.000
- * 25.175 1 12.588
- * 24.000 2 16.000
- * 25.175 2 16.783
- * 36.000 1 18.000
- * 24.000 3 24.000
- * 36.000 2 24.000
- * 25.175 3 25.175
- * 36.000 3 36.000
- */
-struct pixclock {
- u_long min_clock;
- u_long max_clock;
- u_int vidc_ctl;
- u_int vid_ctl;
-};
-
-static struct pixclock arc_clocks[] = {
- /* we allow +/-1% on these */
- { 123750, 126250, VIDC_CTRL_DIV3, VID_CTL_24MHz }, /* 8.000MHz */
- { 82500, 84167, VIDC_CTRL_DIV2, VID_CTL_24MHz }, /* 12.000MHz */
- { 61875, 63125, VIDC_CTRL_DIV1_5, VID_CTL_24MHz }, /* 16.000MHz */
- { 41250, 42083, VIDC_CTRL_DIV1, VID_CTL_24MHz }, /* 24.000MHz */
-};
-
-static struct pixclock *
-acornfb_valid_pixrate(struct fb_var_screeninfo *var)
-{
- u_long pixclock = var->pixclock;
- u_int i;
-
- if (!var->pixclock)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(arc_clocks); i++)
- if (pixclock > arc_clocks[i].min_clock &&
- pixclock < arc_clocks[i].max_clock)
- return arc_clocks + i;
-
- return NULL;
-}
-
-/* VIDC Rules:
- * hcr : must be even (interlace, hcr/2 must be even)
- * hswr : must be even
- * hdsr : must be odd
- * hder : must be odd
- *
- * vcr : must be odd
- * vswr : >= 1
- * vdsr : >= 1
- * vder : >= vdsr
- * if interlaced, then hcr/2 must be even
- */
-static void
-acornfb_set_timing(struct fb_var_screeninfo *var)
-{
- struct pixclock *pclk;
- struct vidc_timing vidc;
- u_int horiz_correction;
- u_int sync_len, display_start, display_end, cycle;
- u_int is_interlaced;
- u_int vid_ctl, vidc_ctl;
- u_int bandwidth;
-
- memset(&vidc, 0, sizeof(vidc));
-
- pclk = acornfb_valid_pixrate(var);
- vidc_ctl = pclk->vidc_ctl;
- vid_ctl = pclk->vid_ctl;
-
- bandwidth = var->pixclock * 8 / var->bits_per_pixel;
- /* 25.175, 4bpp = 79.444ns per byte, 317.776ns per word: fifo = 2,6 */
- if (bandwidth > 143500)
- vidc_ctl |= VIDC_CTRL_FIFO_3_7;
- else if (bandwidth > 71750)
- vidc_ctl |= VIDC_CTRL_FIFO_2_6;
- else if (bandwidth > 35875)
- vidc_ctl |= VIDC_CTRL_FIFO_1_5;
- else
- vidc_ctl |= VIDC_CTRL_FIFO_0_4;
-
- switch (var->bits_per_pixel) {
- case 1:
- horiz_correction = 19;
- vidc_ctl |= VIDC_CTRL_1BPP;
- break;
-
- case 2:
- horiz_correction = 11;
- vidc_ctl |= VIDC_CTRL_2BPP;
- break;
-
- case 4:
- horiz_correction = 7;
- vidc_ctl |= VIDC_CTRL_4BPP;
- break;
-
- default:
- case 8:
- horiz_correction = 5;
- vidc_ctl |= VIDC_CTRL_8BPP;
- break;
- }
-
- if (var->sync & FB_SYNC_COMP_HIGH_ACT) /* should be FB_SYNC_COMP */
- vidc_ctl |= VIDC_CTRL_CSYNC;
- else {
- if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
- vid_ctl |= VID_CTL_HS_NHSYNC;
-
- if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
- vid_ctl |= VID_CTL_VS_NVSYNC;
- }
-
- sync_len = var->hsync_len;
- display_start = sync_len + var->left_margin;
- display_end = display_start + var->xres;
- cycle = display_end + var->right_margin;
-
- /* if interlaced, then hcr/2 must be even */
- is_interlaced = (var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED;
-
- if (is_interlaced) {
- vidc_ctl |= VIDC_CTRL_INTERLACE;
- if (cycle & 2) {
- cycle += 2;
- var->right_margin += 2;
- }
- }
-
- vidc.h_cycle = (cycle - 2) / 2;
- vidc.h_sync_width = (sync_len - 2) / 2;
- vidc.h_border_start = (display_start - 1) / 2;
- vidc.h_display_start = (display_start - horiz_correction) / 2;
- vidc.h_display_end = (display_end - horiz_correction) / 2;
- vidc.h_border_end = (display_end - 1) / 2;
- vidc.h_interlace = (vidc.h_cycle + 1) / 2;
-
- sync_len = var->vsync_len;
- display_start = sync_len + var->upper_margin;
- display_end = display_start + var->yres;
- cycle = display_end + var->lower_margin;
-
- if (is_interlaced)
- cycle = (cycle - 3) / 2;
- else
- cycle = cycle - 1;
-
- vidc.v_cycle = cycle;
- vidc.v_sync_width = sync_len - 1;
- vidc.v_border_start = display_start - 1;
- vidc.v_display_start = vidc.v_border_start;
- vidc.v_display_end = display_end - 1;
- vidc.v_border_end = vidc.v_display_end;
-
- if (machine_is_a5k())
- __raw_writeb(vid_ctl, IOEB_VID_CTL);
-
- if (memcmp(&current_vidc, &vidc, sizeof(vidc))) {
- current_vidc = vidc;
-
- vidc_writel(0xe0000000 | vidc_ctl);
- vidc_writel(0x80000000 | (vidc.h_cycle << 14));
- vidc_writel(0x84000000 | (vidc.h_sync_width << 14));
- vidc_writel(0x88000000 | (vidc.h_border_start << 14));
- vidc_writel(0x8c000000 | (vidc.h_display_start << 14));
- vidc_writel(0x90000000 | (vidc.h_display_end << 14));
- vidc_writel(0x94000000 | (vidc.h_border_end << 14));
- vidc_writel(0x98000000);
- vidc_writel(0x9c000000 | (vidc.h_interlace << 14));
- vidc_writel(0xa0000000 | (vidc.v_cycle << 14));
- vidc_writel(0xa4000000 | (vidc.v_sync_width << 14));
- vidc_writel(0xa8000000 | (vidc.v_border_start << 14));
- vidc_writel(0xac000000 | (vidc.v_display_start << 14));
- vidc_writel(0xb0000000 | (vidc.v_display_end << 14));
- vidc_writel(0xb4000000 | (vidc.v_border_end << 14));
- vidc_writel(0xb8000000);
- vidc_writel(0xbc000000);
- }
-#ifdef DEBUG_MODE_SELECTION
- printk(KERN_DEBUG "VIDC registers for %dx%dx%d:\n", var->xres,
- var->yres, var->bits_per_pixel);
- printk(KERN_DEBUG " H-cycle : %d\n", vidc.h_cycle);
- printk(KERN_DEBUG " H-sync-width : %d\n", vidc.h_sync_width);
- printk(KERN_DEBUG " H-border-start : %d\n", vidc.h_border_start);
- printk(KERN_DEBUG " H-display-start : %d\n", vidc.h_display_start);
- printk(KERN_DEBUG " H-display-end : %d\n", vidc.h_display_end);
- printk(KERN_DEBUG " H-border-end : %d\n", vidc.h_border_end);
- printk(KERN_DEBUG " H-interlace : %d\n", vidc.h_interlace);
- printk(KERN_DEBUG " V-cycle : %d\n", vidc.v_cycle);
- printk(KERN_DEBUG " V-sync-width : %d\n", vidc.v_sync_width);
- printk(KERN_DEBUG " V-border-start : %d\n", vidc.v_border_start);
- printk(KERN_DEBUG " V-display-start : %d\n", vidc.v_display_start);
- printk(KERN_DEBUG " V-display-end : %d\n", vidc.v_display_end);
- printk(KERN_DEBUG " V-border-end : %d\n", vidc.v_border_end);
- printk(KERN_DEBUG " VIDC Ctrl (E) : 0x%08X\n", vidc_ctl);
- printk(KERN_DEBUG " IOEB Ctrl : 0x%08X\n", vid_ctl);
-#endif
-}
-
-static int
-acornfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int trans, struct fb_info *info)
-{
- union palette pal;
-
- if (regno >= current_par.palette_size)
- return 1;
-
- pal.p = 0;
- pal.vidc.reg = regno;
- pal.vidc.red = red >> 12;
- pal.vidc.green = green >> 12;
- pal.vidc.blue = blue >> 12;
-
- current_par.palette[regno] = pal;
-
- vidc_writel(pal.p);
-
- return 0;
-}
-#endif
-
#ifdef HAS_VIDC20
#include <mach/acornfb.h>
@@ -634,16 +394,7 @@ acornfb_adjust_timing(struct fb_info *info, struct fb_var_screeninfo *var, u_int
/* hsync_len must be even */
var->hsync_len = (var->hsync_len + 1) & ~1;
-#ifdef HAS_VIDC
- /* left_margin must be odd */
- if ((var->left_margin & 1) == 0) {
- var->left_margin -= 1;
- var->right_margin += 1;
- }
-
- /* right_margin must be odd */
- var->right_margin |= 1;
-#elif defined(HAS_VIDC20)
+#if defined(HAS_VIDC20)
/* left_margin must be even */
if (var->left_margin & 1) {
var->left_margin += 1;
@@ -787,11 +538,7 @@ static int acornfb_set_par(struct fb_info *info)
break;
case 8:
current_par.palette_size = VIDC_PALETTE_SIZE;
-#ifdef HAS_VIDC
- info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
-#else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
-#endif
break;
#ifdef HAS_VIDC20
case 16:
@@ -971,9 +718,6 @@ static void acornfb_init_fbinfo(void)
#if defined(HAS_VIDC20)
fb_info.var.red.length = 8;
fb_info.var.transp.length = 4;
-#elif defined(HAS_VIDC)
- fb_info.var.red.length = 4;
- fb_info.var.transp.length = 1;
#endif
fb_info.var.green = fb_info.var.red;
fb_info.var.blue = fb_info.var.red;
@@ -1310,14 +1054,6 @@ static int acornfb_probe(struct platform_device *dev)
fb_info.fix.smem_start = handle;
}
#endif
-#if defined(HAS_VIDC)
- /*
- * Archimedes/A5000 machines use a fixed address for their
- * framebuffers. Free unused pages
- */
- free_unused_pages(PAGE_OFFSET + size, PAGE_OFFSET + MAX_SIZE);
-#endif
-
fb_info.fix.smem_len = size;
current_par.palette_size = VIDC_PALETTE_SIZE;
diff --git a/drivers/video/acornfb.h b/drivers/video/acornfb.h
index fb2a7fffe50..175c8ff3367 100644
--- a/drivers/video/acornfb.h
+++ b/drivers/video/acornfb.h
@@ -13,10 +13,6 @@
#include <asm/hardware/iomd.h>
#define VIDC_PALETTE_SIZE 256
#define VIDC_NAME "VIDC20"
-#elif defined(HAS_VIDC)
-#include <asm/hardware/memc.h>
-#define VIDC_PALETTE_SIZE 16
-#define VIDC_NAME "VIDC"
#endif
#define EXTEND8(x) ((x)|(x)<<8)
@@ -101,31 +97,6 @@ struct modex_params {
const struct modey_params *modey;
};
-#ifdef HAS_VIDC
-
-#define VID_CTL_VS_NVSYNC (1 << 3)
-#define VID_CTL_HS_NHSYNC (1 << 2)
-#define VID_CTL_24MHz (0)
-#define VID_CTL_25MHz (1)
-#define VID_CTL_36MHz (2)
-
-#define VIDC_CTRL_CSYNC (1 << 7)
-#define VIDC_CTRL_INTERLACE (1 << 6)
-#define VIDC_CTRL_FIFO_0_4 (0 << 4)
-#define VIDC_CTRL_FIFO_1_5 (1 << 4)
-#define VIDC_CTRL_FIFO_2_6 (2 << 4)
-#define VIDC_CTRL_FIFO_3_7 (3 << 4)
-#define VIDC_CTRL_1BPP (0 << 2)
-#define VIDC_CTRL_2BPP (1 << 2)
-#define VIDC_CTRL_4BPP (2 << 2)
-#define VIDC_CTRL_8BPP (3 << 2)
-#define VIDC_CTRL_DIV3 (0 << 0)
-#define VIDC_CTRL_DIV2 (1 << 0)
-#define VIDC_CTRL_DIV1_5 (2 << 0)
-#define VIDC_CTRL_DIV1 (3 << 0)
-
-#endif
-
#ifdef HAS_VIDC20
/*
* VIDC20 registers
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index dbfe2c18a43..b269abd932a 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -952,7 +952,7 @@ static struct fb_ops ps3fb_ops = {
.fb_compat_ioctl = ps3fb_ioctl
};
-static struct fb_fix_screeninfo ps3fb_fix __initdata = {
+static struct fb_fix_screeninfo ps3fb_fix = {
.id = DEVICE_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 47e12cfc2a5..15c7251b055 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -152,8 +152,6 @@ static int mxc_w1_remove(struct platform_device *pdev)
clk_disable_unprepare(mdev->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 22013ca2119..c7c64f18773 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -234,9 +234,11 @@ static ssize_t w1_master_attribute_store_search(struct device * dev,
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
+ int ret;
- if (strict_strtol(buf, 0, &tmp) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &tmp);
+ if (ret)
+ return ret;
mutex_lock(&md->mutex);
md->search_count = tmp;
@@ -266,9 +268,11 @@ static ssize_t w1_master_attribute_store_pullup(struct device *dev,
{
long tmp;
struct w1_master *md = dev_to_w1_master(dev);
+ int ret;
- if (strict_strtol(buf, 0, &tmp) == -EINVAL)
- return -EINVAL;
+ ret = kstrtol(buf, 0, &tmp);
+ if (ret)
+ return ret;
mutex_lock(&md->mutex);
md->enable_pullup = tmp;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index de7e4f49722..5be5e3d14f7 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -162,7 +162,8 @@ extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
#define HPWDT_ARCH 32
asm(".text \n\t"
- ".align 4 \n"
+ ".align 4 \n\t"
+ ".globl asminline_call \n"
"asminline_call: \n\t"
"pushl %ebp \n\t"
"movl %esp, %ebp \n\t"
@@ -352,7 +353,8 @@ static int detect_cru_service(void)
#define HPWDT_ARCH 64
asm(".text \n\t"
- ".align 4 \n"
+ ".align 4 \n\t"
+ ".globl asminline_call \n"
"asminline_call: \n\t"
"pushq %rbp \n\t"
"movq %rsp, %rbp \n\t"
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index d384a8b77ee..aa5ecf479a5 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -183,7 +183,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
else
flock.length = fl->fl_end - fl->fl_start + 1;
flock.proc_id = fl->fl_pid;
- flock.client_id = utsname()->nodename;
+ flock.client_id = fid->clnt->name;
if (IS_SETLKW(cmd))
flock.flags = P9_LOCK_FLAGS_BLOCK;
@@ -260,7 +260,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
else
glock.length = fl->fl_end - fl->fl_start + 1;
glock.proc_id = fl->fl_pid;
- glock.client_id = utsname()->nodename;
+ glock.client_id = fid->clnt->name;
res = p9_client_getlock_dotl(fid, &glock);
if (res < 0)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 25b018efb8a..94de6d1482e 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -146,7 +146,7 @@ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
char type = 0, ext[32];
int major = -1, minor = -1;
- strncpy(ext, stat->extension, sizeof(ext));
+ strlcpy(ext, stat->extension, sizeof(ext));
sscanf(ext, "%c %u %u", &type, &major, &minor);
switch (type) {
case 'c':
@@ -1186,7 +1186,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
* this even with .u extension. So check
* for non NULL stat->extension
*/
- strncpy(ext, stat->extension, sizeof(ext));
+ strlcpy(ext, stat->extension, sizeof(ext));
/* HARDLINKCOUNT %u */
sscanf(ext, "%13s %u", tag_name, &i_nlink);
if (!strncmp(tag_name, "HARDLINKCOUNT", 13))
diff --git a/fs/affs/file.c b/fs/affs/file.c
index af3261b7810..776e3935a75 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -836,7 +836,7 @@ affs_truncate(struct inode *inode)
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
- u32 size = inode->i_size;
+ loff_t size = inode->i_size;
int res;
res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0b74d3176ab..646337dc520 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -751,10 +751,6 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
_enter("{%x:%u},{%s},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
- ret = -ENAMETOOLONG;
- if (dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
@@ -816,10 +812,6 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
_enter("{%x:%u},{%s}",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
- ret = -ENAMETOOLONG;
- if (dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
@@ -936,10 +928,6 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
_enter("{%x:%u},{%s},%ho,",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
- ret = -ENAMETOOLONG;
- if (dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
@@ -1005,10 +993,6 @@ static int afs_link(struct dentry *from, struct inode *dir,
dvnode->fid.vid, dvnode->fid.vnode,
dentry->d_name.name);
- ret = -ENAMETOOLONG;
- if (dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
@@ -1053,10 +1037,6 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name,
content);
- ret = -ENAMETOOLONG;
- if (dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
ret = -EINVAL;
if (strlen(content) >= AFSPATHMAX)
goto error;
@@ -1127,10 +1107,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dvnode->fid.vid, new_dvnode->fid.vnode,
new_dentry->d_name.name);
- ret = -ENAMETOOLONG;
- if (new_dentry->d_name.len >= AFSNAMEMAX)
- goto error;
-
key = afs_request_key(orig_dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 743c7c2c949..0f00da329e7 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -183,13 +183,14 @@ static int autofs_dev_ioctl_protosubver(struct file *fp,
return 0;
}
+/* Find the topmost mount satisfying test() */
static int find_autofs_mount(const char *pathname,
struct path *res,
int test(struct path *path, void *data),
void *data)
{
struct path path;
- int err = kern_path(pathname, 0, &path);
+ int err = kern_path_mountpoint(AT_FDCWD, pathname, &path, 0);
if (err)
return err;
err = -ENOENT;
@@ -197,10 +198,9 @@ static int find_autofs_mount(const char *pathname,
if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) {
if (test(&path, data)) {
path_get(&path);
- if (!err) /* already found some */
- path_put(res);
*res = path;
err = 0;
+ break;
}
}
if (!follow_up(&path))
@@ -486,12 +486,11 @@ static int autofs_dev_ioctl_askumount(struct file *fp,
* mount if there is one or 0 if it isn't a mountpoint.
*
* If we aren't supplied with a file descriptor then we
- * lookup the nameidata of the path and check if it is the
- * root of a mount. If a type is given we are looking for
- * a particular autofs mount and if we don't find a match
- * we return fail. If the located nameidata path is the
- * root of a mount we return 1 along with the super magic
- * of the mount or 0 otherwise.
+ * lookup the path and check if it is the root of a mount.
+ * If a type is given we are looking for a particular autofs
+ * mount and if we don't find a match we return fail. If the
+ * located path is the root of a mount we return 1 along with
+ * the super magic of the mount or 0 otherwise.
*
* In both cases the the device number (as returned by
* new_encode_dev()) is also returned.
@@ -519,9 +518,11 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
if (!fp || param->ioctlfd == -1) {
if (autofs_type_any(type))
- err = kern_path(name, LOOKUP_FOLLOW, &path);
+ err = kern_path_mountpoint(AT_FDCWD,
+ name, &path, LOOKUP_FOLLOW);
else
- err = find_autofs_mount(name, &path, test_by_type, &type);
+ err = find_autofs_mount(name, &path,
+ test_by_type, &type);
if (err)
goto out;
devid = new_encode_dev(path.dentry->d_sb->s_dev);
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 8fb42916d8a..60250847929 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -716,13 +716,14 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
return 0;
bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
-
- bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
- if (!bs->bvec_integrity_pool)
+ if (!bs->bio_integrity_pool)
return -1;
- if (!bs->bio_integrity_pool)
+ bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
+ if (!bs->bvec_integrity_pool) {
+ mempool_destroy(bs->bio_integrity_pool);
return -1;
+ }
return 0;
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 72f816d6cad..9bdeca12ae0 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -190,6 +190,11 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
err = cn_printf(cn, "%d",
task_tgid_vnr(current));
break;
+ /* global pid */
+ case 'P':
+ err = cn_printf(cn, "%d",
+ task_tgid_nr(current));
+ break;
/* uid */
case 'u':
err = cn_printf(cn, "%d", cred->uid);
diff --git a/fs/dcache.c b/fs/dcache.c
index ca8e9cd60f8..4d9df3c940e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -88,6 +88,35 @@ EXPORT_SYMBOL(rename_lock);
static struct kmem_cache *dentry_cache __read_mostly;
+/**
+ * read_seqbegin_or_lock - begin a sequence number check or locking block
+ * lock: sequence lock
+ * seq : sequence number to be checked
+ *
+ * First try it once optimistically without taking the lock. If that fails,
+ * take the lock. The sequence number is also used as a marker for deciding
+ * whether to be a reader (even) or writer (odd).
+ * N.B. seq must be initialized to an even number to begin with.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ write_seqlock(lock);
+}
+
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ write_sequnlock(lock);
+}
+
/*
* This is the single most critical data structure when it comes
* to the dcache: the hashtable for lookups. Somebody should try
@@ -1012,7 +1041,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
* the parenthood after dropping the lock and check
* that the sequence number still matches.
*/
-static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
+static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
{
struct dentry *new = old->d_parent;
@@ -1026,7 +1055,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
*/
if (new != old->d_parent ||
(old->d_flags & DCACHE_DENTRY_KILLED) ||
- (!locked && read_seqretry(&rename_lock, seq))) {
+ need_seqretry(&rename_lock, seq)) {
spin_unlock(&new->d_lock);
new = NULL;
}
@@ -1063,13 +1092,12 @@ static void d_walk(struct dentry *parent, void *data,
{
struct dentry *this_parent;
struct list_head *next;
- unsigned seq;
- int locked = 0;
+ unsigned seq = 0;
enum d_walk_ret ret;
bool retry = true;
- seq = read_seqbegin(&rename_lock);
again:
+ read_seqbegin_or_lock(&rename_lock, &seq);
this_parent = parent;
spin_lock(&this_parent->d_lock);
@@ -1123,13 +1151,13 @@ resume:
*/
if (this_parent != parent) {
struct dentry *child = this_parent;
- this_parent = try_to_ascend(this_parent, locked, seq);
+ this_parent = try_to_ascend(this_parent, seq);
if (!this_parent)
goto rename_retry;
next = child->d_u.d_child.next;
goto resume;
}
- if (!locked && read_seqretry(&rename_lock, seq)) {
+ if (need_seqretry(&rename_lock, seq)) {
spin_unlock(&this_parent->d_lock);
goto rename_retry;
}
@@ -1138,17 +1166,13 @@ resume:
out_unlock:
spin_unlock(&this_parent->d_lock);
- if (locked)
- write_sequnlock(&rename_lock);
+ done_seqretry(&rename_lock, seq);
return;
rename_retry:
if (!retry)
return;
- if (locked)
- goto again;
- locked = 1;
- write_seqlock(&rename_lock);
+ seq = 1;
goto again;
}
@@ -2647,9 +2671,39 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
return 0;
}
+/**
+ * prepend_name - prepend a pathname in front of current buffer pointer
+ * buffer: buffer pointer
+ * buflen: allocated length of the buffer
+ * name: name string and length qstr structure
+ *
+ * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
+ * make sure that either the old or the new name pointer and length are
+ * fetched. However, there may be mismatch between length and pointer.
+ * The length cannot be trusted, we need to copy it byte-by-byte until
+ * the length is reached or a null byte is found. It also prepends "/" at
+ * the beginning of the name. The sequence number check at the caller will
+ * retry it again when a d_move() does happen. So any garbage in the buffer
+ * due to mismatched pointer and length will be discarded.
+ */
static int prepend_name(char **buffer, int *buflen, struct qstr *name)
{
- return prepend(buffer, buflen, name->name, name->len);
+ const char *dname = ACCESS_ONCE(name->name);
+ u32 dlen = ACCESS_ONCE(name->len);
+ char *p;
+
+ if (*buflen < dlen + 1)
+ return -ENAMETOOLONG;
+ *buflen -= dlen + 1;
+ p = *buffer -= dlen + 1;
+ *p++ = '/';
+ while (dlen--) {
+ char c = *dname++;
+ if (!c)
+ break;
+ *p++ = c;
+ }
+ return 0;
}
/**
@@ -2659,7 +2713,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
* @buffer: pointer to the end of the buffer
* @buflen: pointer to buffer length
*
- * Caller holds the rename_lock.
+ * The function tries to write out the pathname without taking any lock other
+ * than the RCU read lock to make sure that dentries won't go away. It only
+ * checks the sequence number of the global rename_lock as any change in the
+ * dentry's d_seq will be preceded by changes in the rename_lock sequence
+ * number. If the sequence number had been change, it will restart the whole
+ * pathname back-tracing sequence again. It performs a total of 3 trials of
+ * lockless back-tracing sequences before falling back to take the
+ * rename_lock.
*/
static int prepend_path(const struct path *path,
const struct path *root,
@@ -2668,54 +2729,66 @@ static int prepend_path(const struct path *path,
struct dentry *dentry = path->dentry;
struct vfsmount *vfsmnt = path->mnt;
struct mount *mnt = real_mount(vfsmnt);
- bool slash = false;
int error = 0;
+ unsigned seq = 0;
+ char *bptr;
+ int blen;
+ rcu_read_lock();
+restart:
+ bptr = *buffer;
+ blen = *buflen;
+ read_seqbegin_or_lock(&rename_lock, &seq);
while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
/* Global root? */
- if (!mnt_has_parent(mnt))
- goto global_root;
- dentry = mnt->mnt_mountpoint;
- mnt = mnt->mnt_parent;
- vfsmnt = &mnt->mnt;
- continue;
+ if (mnt_has_parent(mnt)) {
+ dentry = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
+ vfsmnt = &mnt->mnt;
+ continue;
+ }
+ /*
+ * Filesystems needing to implement special "root names"
+ * should do so with ->d_dname()
+ */
+ if (IS_ROOT(dentry) &&
+ (dentry->d_name.len != 1 ||
+ dentry->d_name.name[0] != '/')) {
+ WARN(1, "Root dentry has weird name <%.*s>\n",
+ (int) dentry->d_name.len,
+ dentry->d_name.name);
+ }
+ if (!error)
+ error = is_mounted(vfsmnt) ? 1 : 2;
+ break;
}
parent = dentry->d_parent;
prefetch(parent);
- spin_lock(&dentry->d_lock);
- error = prepend_name(buffer, buflen, &dentry->d_name);
- spin_unlock(&dentry->d_lock);
- if (!error)
- error = prepend(buffer, buflen, "/", 1);
+ error = prepend_name(&bptr, &blen, &dentry->d_name);
if (error)
break;
- slash = true;
dentry = parent;
}
+ if (!(seq & 1))
+ rcu_read_unlock();
+ if (need_seqretry(&rename_lock, seq)) {
+ seq = 1;
+ goto restart;
+ }
+ done_seqretry(&rename_lock, seq);
- if (!error && !slash)
- error = prepend(buffer, buflen, "/", 1);
-
- return error;
-
-global_root:
- /*
- * Filesystems needing to implement special "root names"
- * should do so with ->d_dname()
- */
- if (IS_ROOT(dentry) &&
- (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
- WARN(1, "Root dentry has weird name <%.*s>\n",
- (int) dentry->d_name.len, dentry->d_name.name);
- }
- if (!slash)
- error = prepend(buffer, buflen, "/", 1);
- if (!error)
- error = is_mounted(vfsmnt) ? 1 : 2;
+ if (error >= 0 && bptr == *buffer) {
+ if (--blen < 0)
+ error = -ENAMETOOLONG;
+ else
+ *--bptr = '/';
+ }
+ *buffer = bptr;
+ *buflen = blen;
return error;
}
@@ -2744,9 +2817,7 @@ char *__d_path(const struct path *path,
prepend(&res, &buflen, "\0", 1);
br_read_lock(&vfsmount_lock);
- write_seqlock(&rename_lock);
error = prepend_path(path, root, &res, &buflen);
- write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
@@ -2765,9 +2836,7 @@ char *d_absolute_path(const struct path *path,
prepend(&res, &buflen, "\0", 1);
br_read_lock(&vfsmount_lock);
- write_seqlock(&rename_lock);
error = prepend_path(path, &root, &res, &buflen);
- write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error > 1)
@@ -2833,9 +2902,7 @@ char *d_path(const struct path *path, char *buf, int buflen)
get_fs_root(current->fs, &root);
br_read_lock(&vfsmount_lock);
- write_seqlock(&rename_lock);
error = path_with_deleted(path, &root, &res, &buflen);
- write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
res = ERR_PTR(error);
@@ -2870,10 +2937,10 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
char *end = buffer + buflen;
/* these dentries are never renamed, so d_lock is not needed */
if (prepend(&end, &buflen, " (deleted)", 11) ||
- prepend_name(&end, &buflen, &dentry->d_name) ||
+ prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
prepend(&end, &buflen, "/", 1))
end = ERR_PTR(-ENAMETOOLONG);
- return end;
+ return end;
}
/*
@@ -2881,30 +2948,42 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
*/
static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
{
- char *end = buf + buflen;
- char *retval;
+ char *end, *retval;
+ int len, seq = 0;
+ int error = 0;
- prepend(&end, &buflen, "\0", 1);
+ rcu_read_lock();
+restart:
+ end = buf + buflen;
+ len = buflen;
+ prepend(&end, &len, "\0", 1);
if (buflen < 1)
goto Elong;
/* Get '/' right */
retval = end-1;
*retval = '/';
-
+ read_seqbegin_or_lock(&rename_lock, &seq);
while (!IS_ROOT(dentry)) {
struct dentry *parent = dentry->d_parent;
int error;
prefetch(parent);
- spin_lock(&dentry->d_lock);
- error = prepend_name(&end, &buflen, &dentry->d_name);
- spin_unlock(&dentry->d_lock);
- if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
- goto Elong;
+ error = prepend_name(&end, &len, &dentry->d_name);
+ if (error)
+ break;
retval = end;
dentry = parent;
}
+ if (!(seq & 1))
+ rcu_read_unlock();
+ if (need_seqretry(&rename_lock, seq)) {
+ seq = 1;
+ goto restart;
+ }
+ done_seqretry(&rename_lock, seq);
+ if (error)
+ goto Elong;
return retval;
Elong:
return ERR_PTR(-ENAMETOOLONG);
@@ -2912,13 +2991,7 @@ Elong:
char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
{
- char *retval;
-
- write_seqlock(&rename_lock);
- retval = __dentry_path(dentry, buf, buflen);
- write_sequnlock(&rename_lock);
-
- return retval;
+ return __dentry_path(dentry, buf, buflen);
}
EXPORT_SYMBOL(dentry_path_raw);
@@ -2927,7 +3000,6 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
char *p = NULL;
char *retval;
- write_seqlock(&rename_lock);
if (d_unlinked(dentry)) {
p = buf + buflen;
if (prepend(&p, &buflen, "//deleted", 10) != 0)
@@ -2935,7 +3007,6 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
buflen++;
}
retval = __dentry_path(dentry, buf, buflen);
- write_sequnlock(&rename_lock);
if (!IS_ERR(retval) && p)
*p = '/'; /* restore '/' overriden with '\0' */
return retval;
@@ -2974,7 +3045,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
error = -ENOENT;
br_read_lock(&vfsmount_lock);
- write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
char *cwd = page + PAGE_SIZE;
@@ -2982,7 +3052,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &root, &cwd, &buflen);
- write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
@@ -3003,7 +3072,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
error = -EFAULT;
}
} else {
- write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 293f86741dd..473e09da7d0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -740,6 +740,7 @@ static void ep_free(struct eventpoll *ep)
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
+ cond_resched();
}
/*
@@ -754,6 +755,7 @@ static void ep_free(struct eventpoll *ep)
while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
+ cond_resched();
}
mutex_unlock(&ep->mtx);
diff --git a/fs/exec.c b/fs/exec.c
index fd774c7cb48..8875dd10ae7 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -74,6 +74,8 @@ static DEFINE_RWLOCK(binfmt_lock);
void __register_binfmt(struct linux_binfmt * fmt, int insert)
{
BUG_ON(!fmt);
+ if (WARN_ON(!fmt->load_binary))
+ return;
write_lock(&binfmt_lock);
insert ? list_add(&fmt->lh, &formats) :
list_add_tail(&fmt->lh, &formats);
@@ -266,7 +268,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
- vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+ vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
@@ -1365,18 +1367,18 @@ out:
}
EXPORT_SYMBOL(remove_arg_zero);
+#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
int search_binary_handler(struct linux_binprm *bprm)
{
- unsigned int depth = bprm->recursion_depth;
- int try,retval;
+ bool need_retry = IS_ENABLED(CONFIG_MODULES);
struct linux_binfmt *fmt;
- pid_t old_pid, old_vpid;
+ int retval;
/* This allows 4 levels of binfmt rewrites before failing hard. */
- if (depth > 5)
+ if (bprm->recursion_depth > 5)
return -ELOOP;
retval = security_bprm_check(bprm);
@@ -1387,71 +1389,67 @@ int search_binary_handler(struct linux_binprm *bprm)
if (retval)
return retval;
+ retval = -ENOENT;
+ retry:
+ read_lock(&binfmt_lock);
+ list_for_each_entry(fmt, &formats, lh) {
+ if (!try_module_get(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
+ bprm->recursion_depth++;
+ retval = fmt->load_binary(bprm);
+ bprm->recursion_depth--;
+ if (retval >= 0 || retval != -ENOEXEC ||
+ bprm->mm == NULL || bprm->file == NULL) {
+ put_binfmt(fmt);
+ return retval;
+ }
+ read_lock(&binfmt_lock);
+ put_binfmt(fmt);
+ }
+ read_unlock(&binfmt_lock);
+
+ if (need_retry && retval == -ENOEXEC) {
+ if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
+ printable(bprm->buf[2]) && printable(bprm->buf[3]))
+ return retval;
+ if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
+ return retval;
+ need_retry = false;
+ goto retry;
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(search_binary_handler);
+
+static int exec_binprm(struct linux_binprm *bprm)
+{
+ pid_t old_pid, old_vpid;
+ int ret;
+
/* Need to fetch pid before load_binary changes it */
old_pid = current->pid;
rcu_read_lock();
old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
- retval = -ENOENT;
- for (try=0; try<2; try++) {
- read_lock(&binfmt_lock);
- list_for_each_entry(fmt, &formats, lh) {
- int (*fn)(struct linux_binprm *) = fmt->load_binary;
- if (!fn)
- continue;
- if (!try_module_get(fmt->module))
- continue;
- read_unlock(&binfmt_lock);
- bprm->recursion_depth = depth + 1;
- retval = fn(bprm);
- bprm->recursion_depth = depth;
- if (retval >= 0) {
- if (depth == 0) {
- trace_sched_process_exec(current, old_pid, bprm);
- ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
- }
- put_binfmt(fmt);
- allow_write_access(bprm->file);
- if (bprm->file)
- fput(bprm->file);
- bprm->file = NULL;
- current->did_exec = 1;
- proc_exec_connector(current);
- return retval;
- }
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
- if (retval != -ENOEXEC || bprm->mm == NULL)
- break;
- if (!bprm->file) {
- read_unlock(&binfmt_lock);
- return retval;
- }
+ ret = search_binary_handler(bprm);
+ if (ret >= 0) {
+ trace_sched_process_exec(current, old_pid, bprm);
+ ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
+ current->did_exec = 1;
+ proc_exec_connector(current);
+
+ if (bprm->file) {
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL; /* to catch use-after-free */
}
- read_unlock(&binfmt_lock);
-#ifdef CONFIG_MODULES
- if (retval != -ENOEXEC || bprm->mm == NULL) {
- break;
- } else {
-#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
- if (printable(bprm->buf[0]) &&
- printable(bprm->buf[1]) &&
- printable(bprm->buf[2]) &&
- printable(bprm->buf[3]))
- break; /* -ENOEXEC */
- if (try)
- break; /* -ENOEXEC */
- request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
- }
-#else
- break;
-#endif
}
- return retval;
-}
-EXPORT_SYMBOL(search_binary_handler);
+ return ret;
+}
/*
* sys_execve() executes a new program.
@@ -1541,7 +1539,7 @@ static int do_execve_common(const char *filename,
if (retval < 0)
goto out;
- retval = search_binary_handler(bprm);
+ retval = exec_binprm(bprm);
if (retval < 0)
goto out;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 293bc2e47a7..a235f001688 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -231,7 +231,7 @@ static int filldir_one(void * __buf, const char * name, int len,
int result = 0;
buf->sequence++;
- if (buf->ino == ino) {
+ if (buf->ino == ino && len <= NAME_MAX) {
memcpy(buf->name, name, len);
buf->name[len] = '\0';
buf->found = 1;
diff --git a/fs/file_table.c b/fs/file_table.c
index 322cd37626c..abdd15ad13c 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -311,8 +311,7 @@ void fput(struct file *file)
return;
/*
* After this task has run exit_task_work(),
- * task_work_add() will fail. free_ipc_ns()->
- * shm_destroy() can do this. Fall through to delayed
+ * task_work_add() will fail. Fall through to delayed
* fput to avoid leaking *file.
*/
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 68851ff2fd4..30f6f27d5a5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -723,7 +723,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
return wrote;
}
-long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
+static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason)
{
struct wb_writeback_work work = {
@@ -1049,10 +1049,8 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
{
struct backing_dev_info *bdi;
- if (!nr_pages) {
- nr_pages = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS);
- }
+ if (!nr_pages)
+ nr_pages = get_nr_dirty_pages();
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
@@ -1173,6 +1171,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
bool wakeup_bdi = false;
bdi = inode_to_bdi(inode);
+ spin_unlock(&inode->i_lock);
+ spin_lock(&bdi->wb.list_lock);
if (bdi_cap_writeback_dirty(bdi)) {
WARN(!test_bit(BDI_registered, &bdi->state),
"bdi-%s not registered\n", bdi->name);
@@ -1187,8 +1187,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
wakeup_bdi = true;
}
- spin_unlock(&inode->i_lock);
- spin_lock(&bdi->wb.list_lock);
inode->dirtied_when = jiffies;
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
spin_unlock(&bdi->wb.list_lock);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 8702b732109..73899c1c344 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -913,7 +913,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
(1 << FSCACHE_OP_WAITING) |
(1 << FSCACHE_OP_UNUSE_COOKIE);
- ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+ ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
if (ret < 0)
goto nomem_free;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e0fe703ee3d..84434594e80 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -930,7 +930,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
/* fuse does it's own writeback accounting */
- fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
+ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
err = bdi_init(&fc->bdi);
if (err)
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index a63371815aa..24bc20fd42f 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -11,3 +11,21 @@ config HFSPLUS_FS
MacOS 8. It includes all Mac specific filesystem data such as
data forks and creator codes, but it also has several UNIX
style features such as file ownership and permissions.
+
+config HFSPLUS_FS_POSIX_ACL
+ bool "HFS+ POSIX Access Control Lists"
+ depends on HFSPLUS_FS
+ select FS_POSIX_ACL
+ help
+ POSIX Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ It needs to understand that POSIX ACLs are treated only under
+ Linux. POSIX ACLs doesn't mean something under Mac OS X.
+ Mac OS X beginning with version 10.4 ("Tiger") support NFSv4 ACLs,
+ which are part of the NFSv4 standard.
+
+ If you don't know what Access Control Lists are, say N
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index 09d278bb7b9..683fca2e5e6 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
+
+hfsplus-$(CONFIG_HFSPLUS_FS_POSIX_ACL) += posix_acl.o
diff --git a/fs/hfsplus/acl.h b/fs/hfsplus/acl.h
new file mode 100644
index 00000000000..07c0d494752
--- /dev/null
+++ b/fs/hfsplus/acl.h
@@ -0,0 +1,30 @@
+/*
+ * linux/fs/hfsplus/acl.h
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for Posix Access Control Lists (ACLs) support.
+ */
+
+#include <linux/posix_acl_xattr.h>
+
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+
+/* posix_acl.c */
+struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type);
+extern int hfsplus_posix_acl_chmod(struct inode *);
+extern int hfsplus_init_posix_acl(struct inode *, struct inode *);
+
+#else /* CONFIG_HFSPLUS_FS_POSIX_ACL */
+#define hfsplus_get_posix_acl NULL
+
+static inline int hfsplus_posix_acl_chmod(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
+{
+ return 0;
+}
+#endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index d8ce4bd17fc..4a4fea00267 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -16,6 +16,7 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
+#include "acl.h"
static inline void hfsplus_instantiate(struct dentry *dentry,
struct inode *inode, u32 cnid)
@@ -529,6 +530,9 @@ const struct inode_operations hfsplus_dir_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = hfsplus_listxattr,
.removexattr = hfsplus_removexattr,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+ .get_acl = hfsplus_get_posix_acl,
+#endif
};
const struct file_operations hfsplus_dir_operations = {
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index ede79317cfb..2b9cd01696e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -30,6 +30,7 @@
#define DBG_EXTENT 0x00000020
#define DBG_BITMAP 0x00000040
#define DBG_ATTR_MOD 0x00000080
+#define DBG_ACL_MOD 0x00000100
#if 0
#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f833d35630a..4d2edaea891 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -19,6 +19,7 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
+#include "acl.h"
static int hfsplus_readpage(struct file *file, struct page *page)
{
@@ -316,6 +317,13 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
setattr_copy(inode, attr);
mark_inode_dirty(inode);
+
+ if (attr->ia_valid & ATTR_MODE) {
+ error = hfsplus_posix_acl_chmod(inode);
+ if (unlikely(error))
+ return error;
+ }
+
return 0;
}
@@ -383,6 +391,9 @@ static const struct inode_operations hfsplus_file_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = hfsplus_listxattr,
.removexattr = hfsplus_removexattr,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+ .get_acl = hfsplus_get_posix_acl,
+#endif
};
static const struct file_operations hfsplus_file_operations = {
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
new file mode 100644
index 00000000000..b609cc14c72
--- /dev/null
+++ b/fs/hfsplus/posix_acl.c
@@ -0,0 +1,274 @@
+/*
+ * linux/fs/hfsplus/posix_acl.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for Posix Access Control Lists (ACLs) support.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+#include "acl.h"
+
+struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ char *xattr_name;
+ char *value = NULL;
+ ssize_t size;
+
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ xattr_name = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ xattr_name = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = __hfsplus_getxattr(inode, xattr_name, NULL, 0);
+
+ if (size > 0) {
+ value = (char *)hfsplus_alloc_attr_entry();
+ if (unlikely(!value))
+ return ERR_PTR(-ENOMEM);
+ size = __hfsplus_getxattr(inode, xattr_name, value, size);
+ }
+
+ if (size > 0)
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ else if (size == -ENODATA)
+ acl = NULL;
+ else
+ acl = ERR_PTR(size);
+
+ hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
+
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
+
+ return acl;
+}
+
+static int hfsplus_set_posix_acl(struct inode *inode,
+ int type,
+ struct posix_acl *acl)
+{
+ int err;
+ char *xattr_name;
+ size_t size = 0;
+ char *value = NULL;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ xattr_name = POSIX_ACL_XATTR_ACCESS;
+ if (acl) {
+ err = posix_acl_equiv_mode(acl, &inode->i_mode);
+ if (err < 0)
+ return err;
+ }
+ err = 0;
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ xattr_name = POSIX_ACL_XATTR_DEFAULT;
+ if (!S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (acl) {
+ size = posix_acl_xattr_size(acl->a_count);
+ if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE))
+ return -ENOMEM;
+ value = (char *)hfsplus_alloc_attr_entry();
+ if (unlikely(!value))
+ return -ENOMEM;
+ err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+ if (unlikely(err < 0))
+ goto end_set_acl;
+ }
+
+ err = __hfsplus_setxattr(inode, xattr_name, value, size, 0);
+
+end_set_acl:
+ hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
+
+ if (!err)
+ set_cached_acl(inode, type, acl);
+
+ return err;
+}
+
+int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
+{
+ int err = 0;
+ struct posix_acl *acl = NULL;
+
+ hfs_dbg(ACL_MOD,
+ "[%s]: ino %lu, dir->ino %lu\n",
+ __func__, inode->i_ino, dir->i_ino);
+
+ if (S_ISLNK(inode->i_mode))
+ return 0;
+
+ acl = hfsplus_get_posix_acl(dir, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ if (acl) {
+ if (S_ISDIR(inode->i_mode)) {
+ err = hfsplus_set_posix_acl(inode,
+ ACL_TYPE_DEFAULT,
+ acl);
+ if (unlikely(err))
+ goto init_acl_cleanup;
+ }
+
+ err = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
+ if (unlikely(err < 0))
+ return err;
+
+ if (err > 0)
+ err = hfsplus_set_posix_acl(inode,
+ ACL_TYPE_ACCESS,
+ acl);
+ } else
+ inode->i_mode &= ~current_umask();
+
+init_acl_cleanup:
+ posix_acl_release(acl);
+ return err;
+}
+
+int hfsplus_posix_acl_chmod(struct inode *inode)
+{
+ int err;
+ struct posix_acl *acl;
+
+ hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino);
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ acl = hfsplus_get_posix_acl(inode, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl) || !acl)
+ return PTR_ERR(acl);
+
+ err = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+ if (unlikely(err))
+ return err;
+
+ err = hfsplus_set_posix_acl(inode, ACL_TYPE_ACCESS, acl);
+ posix_acl_release(acl);
+ return err;
+}
+
+static int hfsplus_xattr_get_posix_acl(struct dentry *dentry,
+ const char *name,
+ void *buffer,
+ size_t size,
+ int type)
+{
+ int err = 0;
+ struct posix_acl *acl;
+
+ hfs_dbg(ACL_MOD,
+ "[%s]: ino %lu, buffer %p, size %zu, type %#x\n",
+ __func__, dentry->d_inode->i_ino, buffer, size, type);
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ acl = hfsplus_get_posix_acl(dentry->d_inode, type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl == NULL)
+ return -ENODATA;
+
+ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ posix_acl_release(acl);
+
+ return err;
+}
+
+static int hfsplus_xattr_set_posix_acl(struct dentry *dentry,
+ const char *name,
+ const void *value,
+ size_t size,
+ int flags,
+ int type)
+{
+ int err = 0;
+ struct inode *inode = dentry->d_inode;
+ struct posix_acl *acl = NULL;
+
+ hfs_dbg(ACL_MOD,
+ "[%s]: ino %lu, value %p, size %zu, flags %#x, type %#x\n",
+ __func__, inode->i_ino, value, size, flags, type);
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+
+ if (value) {
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ else if (acl) {
+ err = posix_acl_valid(acl);
+ if (err)
+ goto end_xattr_set_acl;
+ }
+ }
+
+ err = hfsplus_set_posix_acl(inode, type, acl);
+
+end_xattr_set_acl:
+ posix_acl_release(acl);
+ return err;
+}
+
+static size_t hfsplus_xattr_list_posix_acl(struct dentry *dentry,
+ char *list,
+ size_t list_size,
+ const char *name,
+ size_t name_len,
+ int type)
+{
+ /*
+ * This method is not used.
+ * It is used hfsplus_listxattr() instead of generic_listxattr().
+ */
+ return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .list = hfsplus_xattr_list_posix_acl,
+ .get = hfsplus_xattr_get_posix_acl,
+ .set = hfsplus_xattr_set_posix_acl,
+};
+
+const struct xattr_handler hfsplus_xattr_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .list = hfsplus_xattr_list_posix_acl,
+ .get = hfsplus_xattr_get_posix_acl,
+ .set = hfsplus_xattr_set_posix_acl,
+};
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index f66346155df..bd8471fb9a6 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -8,11 +8,16 @@
#include "hfsplus_fs.h"
#include "xattr.h"
+#include "acl.h"
const struct xattr_handler *hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+ &hfsplus_xattr_acl_access_handler,
+ &hfsplus_xattr_acl_default_handler,
+#endif
&hfsplus_xattr_security_handler,
NULL
};
@@ -46,11 +51,58 @@ static inline int is_known_namespace(const char *name)
return true;
}
+static int can_set_system_xattr(struct inode *inode, const char *name,
+ const void *value, size_t size)
+{
+#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
+ struct posix_acl *acl;
+ int err;
+
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+
+ /*
+ * POSIX_ACL_XATTR_ACCESS is tied to i_mode
+ */
+ if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) {
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ err = posix_acl_equiv_mode(acl, &inode->i_mode);
+ posix_acl_release(acl);
+ if (err < 0)
+ return err;
+ mark_inode_dirty(inode);
+ }
+ /*
+ * We're changing the ACL. Get rid of the cached one
+ */
+ forget_cached_acl(inode, ACL_TYPE_ACCESS);
+
+ return 0;
+ } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ posix_acl_release(acl);
+
+ /*
+ * We're changing the default ACL. Get rid of the cached one
+ */
+ forget_cached_acl(inode, ACL_TYPE_DEFAULT);
+
+ return 0;
+ }
+#endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */
+ return -EOPNOTSUPP;
+}
+
static int can_set_xattr(struct inode *inode, const char *name,
const void *value, size_t value_len)
{
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return -EOPNOTSUPP; /* TODO: implement ACL support */
+ return can_set_system_xattr(inode, name, value, value_len);
if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) {
/*
@@ -253,11 +305,10 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len)
return len;
}
-static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
+static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
void *value, size_t size)
{
ssize_t res = 0;
- struct inode *inode = dentry->d_inode;
struct hfs_find_data fd;
u16 entry_type;
u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
@@ -304,10 +355,9 @@ end_getxattr_finder_info:
return res;
}
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size)
{
- struct inode *inode = dentry->d_inode;
struct hfs_find_data fd;
hfsplus_attr_entry *entry;
__be32 xattr_record_type;
@@ -333,7 +383,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
}
if (!strcmp_xattr_finder_info(name))
- return hfsplus_getxattr_finder_info(dentry, value, size);
+ return hfsplus_getxattr_finder_info(inode, value, size);
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
return -EOPNOTSUPP;
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index 847b695b984..841b5698c0f 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -14,8 +14,8 @@
extern const struct xattr_handler hfsplus_xattr_osx_handler;
extern const struct xattr_handler hfsplus_xattr_user_handler;
extern const struct xattr_handler hfsplus_xattr_trusted_handler;
-/*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/
-/*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/
+extern const struct xattr_handler hfsplus_xattr_acl_access_handler;
+extern const struct xattr_handler hfsplus_xattr_acl_default_handler;
extern const struct xattr_handler hfsplus_xattr_security_handler;
extern const struct xattr_handler *hfsplus_xattr_handlers[];
@@ -29,9 +29,17 @@ static inline int hfsplus_setxattr(struct dentry *dentry, const char *name,
return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
}
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
void *value, size_t size);
+static inline ssize_t hfsplus_getxattr(struct dentry *dentry,
+ const char *name,
+ void *value,
+ size_t size)
+{
+ return __hfsplus_getxattr(dentry->d_inode, name, value, size);
+}
+
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
int hfsplus_removexattr(struct dentry *dentry, const char *name);
@@ -39,22 +47,7 @@ int hfsplus_removexattr(struct dentry *dentry, const char *name);
int hfsplus_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr);
-static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir)
-{
- /*TODO: implement*/
- return 0;
-}
-
-static inline int hfsplus_init_inode_security(struct inode *inode,
- struct inode *dir,
- const struct qstr *qstr)
-{
- int err;
-
- err = hfsplus_init_acl(inode, dir);
- if (!err)
- err = hfsplus_init_security(inode, dir, qstr);
- return err;
-}
+int hfsplus_init_inode_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
#endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index 83b842f113c..00722765ea7 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -9,6 +9,7 @@
#include <linux/security.h>
#include "hfsplus_fs.h"
#include "xattr.h"
+#include "acl.h"
static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
@@ -96,6 +97,18 @@ int hfsplus_init_security(struct inode *inode, struct inode *dir,
&hfsplus_initxattrs, NULL);
}
+int hfsplus_init_inode_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr)
+{
+ int err;
+
+ err = hfsplus_init_posix_acl(inode, dir);
+ if (!err)
+ err = hfsplus_init_security(inode, dir, qstr);
+ return err;
+}
+
const struct xattr_handler hfsplus_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = hfsplus_security_listxattr,
diff --git a/fs/internal.h b/fs/internal.h
index d2089379552..2be46ea5dd0 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -45,6 +45,9 @@ extern void __init chrdev_init(void);
* namei.c
*/
extern int __inode_permission(struct inode *, int);
+extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+ const char *, unsigned int, struct path *);
/*
* namespace.c
diff --git a/fs/namei.c b/fs/namei.c
index 56e4f4d537d..409a441ba2a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -574,9 +574,12 @@ unlock_and_drop_dentry:
drop_dentry:
unlock_rcu_walk();
dput(dentry);
- return -ECHILD;
+ goto drop_root_mnt;
out:
unlock_rcu_walk();
+drop_root_mnt:
+ if (!(nd->flags & LOOKUP_ROOT))
+ nd->root.mnt = NULL;
return -ECHILD;
}
@@ -2206,7 +2209,7 @@ user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
}
/**
- * umount_lookup_last - look up last component for umount
+ * mountpoint_last - look up last component for umount
* @nd: pathwalk nameidata - currently pointing at parent directory of "last"
* @path: pointer to container for result
*
@@ -2233,25 +2236,28 @@ user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
* to the link, and nd->path will *not* be put.
*/
static int
-umount_lookup_last(struct nameidata *nd, struct path *path)
+mountpoint_last(struct nameidata *nd, struct path *path)
{
int error = 0;
struct dentry *dentry;
struct dentry *dir = nd->path.dentry;
- if (unlikely(nd->flags & LOOKUP_RCU)) {
- WARN_ON_ONCE(1);
- error = -ECHILD;
- goto error_check;
+ /* If we're in rcuwalk, drop out of it to handle last component */
+ if (nd->flags & LOOKUP_RCU) {
+ if (unlazy_walk(nd, NULL)) {
+ error = -ECHILD;
+ goto out;
+ }
}
nd->flags &= ~LOOKUP_PARENT;
if (unlikely(nd->last_type != LAST_NORM)) {
error = handle_dots(nd, nd->last_type);
- if (!error)
- dentry = dget(nd->path.dentry);
- goto error_check;
+ if (error)
+ goto out;
+ dentry = dget(nd->path.dentry);
+ goto done;
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -2265,44 +2271,43 @@ umount_lookup_last(struct nameidata *nd, struct path *path)
dentry = d_alloc(dir, &nd->last);
if (!dentry) {
error = -ENOMEM;
- } else {
- dentry = lookup_real(dir->d_inode, dentry, nd->flags);
- if (IS_ERR(dentry))
- error = PTR_ERR(dentry);
+ goto out;
}
+ dentry = lookup_real(dir->d_inode, dentry, nd->flags);
+ error = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ goto out;
}
mutex_unlock(&dir->d_inode->i_mutex);
-error_check:
- if (!error) {
- if (!dentry->d_inode) {
- error = -ENOENT;
- dput(dentry);
- } else {
- path->dentry = dentry;
- path->mnt = mntget(nd->path.mnt);
- if (should_follow_link(dentry->d_inode,
- nd->flags & LOOKUP_FOLLOW))
- return 1;
- follow_mount(path);
- }
+done:
+ if (!dentry->d_inode) {
+ error = -ENOENT;
+ dput(dentry);
+ goto out;
}
+ path->dentry = dentry;
+ path->mnt = mntget(nd->path.mnt);
+ if (should_follow_link(dentry->d_inode, nd->flags & LOOKUP_FOLLOW))
+ return 1;
+ follow_mount(path);
+ error = 0;
+out:
terminate_walk(nd);
return error;
}
/**
- * path_umountat - look up a path to be umounted
+ * path_mountpoint - look up a path to be umounted
* @dfd: directory file descriptor to start walk from
* @name: full pathname to walk
* @flags: lookup flags
- * @nd: pathwalk nameidata
*
* Look up the given name, but don't attempt to revalidate the last component.
* Returns 0 and "path" will be valid on success; Retuns error otherwise.
*/
static int
-path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
+path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
{
struct file *base = NULL;
struct nameidata nd;
@@ -2317,16 +2322,7 @@ path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
if (err)
goto out;
- /* If we're in rcuwalk, drop out of it to handle last component */
- if (nd.flags & LOOKUP_RCU) {
- err = unlazy_walk(&nd, NULL);
- if (err) {
- terminate_walk(&nd);
- goto out;
- }
- }
-
- err = umount_lookup_last(&nd, path);
+ err = mountpoint_last(&nd, path);
while (err > 0) {
void *cookie;
struct path link = *path;
@@ -2337,7 +2333,7 @@ path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
err = follow_link(&link, &nd, &cookie);
if (err)
break;
- err = umount_lookup_last(&nd, path);
+ err = mountpoint_last(&nd, path);
put_link(&nd, &link, cookie);
}
out:
@@ -2350,8 +2346,22 @@ out:
return err;
}
+static int
+filename_mountpoint(int dfd, struct filename *s, struct path *path,
+ unsigned int flags)
+{
+ int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+ if (unlikely(error == -ECHILD))
+ error = path_mountpoint(dfd, s->name, path, flags);
+ if (unlikely(error == -ESTALE))
+ error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+ if (likely(!error))
+ audit_inode(s, path->dentry, 0);
+ return error;
+}
+
/**
- * user_path_umountat - lookup a path from userland in order to umount it
+ * user_path_mountpoint_at - lookup a path from userland in order to umount it
* @dfd: directory file descriptor
* @name: pathname from userland
* @flags: lookup flags
@@ -2365,28 +2375,27 @@ out:
* Returns 0 and populates "path" on success.
*/
int
-user_path_umountat(int dfd, const char __user *name, unsigned int flags,
+user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
struct path *path)
{
struct filename *s = getname(name);
int error;
-
if (IS_ERR(s))
return PTR_ERR(s);
-
- error = path_umountat(dfd, s->name, path, flags | LOOKUP_RCU);
- if (unlikely(error == -ECHILD))
- error = path_umountat(dfd, s->name, path, flags);
- if (unlikely(error == -ESTALE))
- error = path_umountat(dfd, s->name, path, flags | LOOKUP_REVAL);
-
- if (likely(!error))
- audit_inode(s, path->dentry, 0);
-
+ error = filename_mountpoint(dfd, s, path, flags);
putname(s);
return error;
}
+int
+kern_path_mountpoint(int dfd, const char *name, struct path *path,
+ unsigned int flags)
+{
+ struct filename s = {.name = name};
+ return filename_mountpoint(dfd, &s, path, flags);
+}
+EXPORT_SYMBOL(kern_path_mountpoint);
+
/*
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
diff --git a/fs/namespace.c b/fs/namespace.c
index fc2b5226278..da5c4948343 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -17,7 +17,7 @@
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/acct.h> /* acct_auto_close_mnt */
-#include <linux/ramfs.h> /* init_rootfs */
+#include <linux/init.h> /* init_rootfs */
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
@@ -1321,7 +1321,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- retval = user_path_umountat(AT_FDCWD, name, lookup_flags, &path);
+ retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
mnt = real_mount(path.mnt);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 105a3b080d1..e0a65a9e37e 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -173,8 +173,6 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
int status;
struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
-
if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
if (!nn->rec_file)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 43f42290e5d..0874998a49c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -368,11 +368,8 @@ static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
{
struct nfs4_delegation *dp;
- struct nfs4_file *fp = stp->st_file;
dprintk("NFSD alloc_init_deleg\n");
- if (fp->fi_had_conflict)
- return NULL;
if (num_delegations > max_delegations)
return NULL;
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
@@ -389,8 +386,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
- get_nfs4_file(fp);
- dp->dl_file = fp;
+ dp->dl_file = NULL;
dp->dl_type = NFS4_OPEN_DELEGATE_READ;
fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
dp->dl_time = 0;
@@ -3035,7 +3031,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
if (status) {
list_del_init(&dp->dl_perclnt);
locks_free_lock(fl);
- return -ENOMEM;
+ return status;
}
fp->fi_lease = fl;
fp->fi_deleg_file = get_file(fl->fl_file);
@@ -3044,22 +3040,35 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
return 0;
}
-static int nfs4_set_delegation(struct nfs4_delegation *dp)
+static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
- struct nfs4_file *fp = dp->dl_file;
+ int status;
- if (!fp->fi_lease)
- return nfs4_setlease(dp);
+ if (fp->fi_had_conflict)
+ return -EAGAIN;
+ get_nfs4_file(fp);
+ dp->dl_file = fp;
+ if (!fp->fi_lease) {
+ status = nfs4_setlease(dp);
+ if (status)
+ goto out_free;
+ return 0;
+ }
spin_lock(&recall_lock);
if (fp->fi_had_conflict) {
spin_unlock(&recall_lock);
- return -EAGAIN;
+ status = -EAGAIN;
+ goto out_free;
}
atomic_inc(&fp->fi_delegees);
list_add(&dp->dl_perfile, &fp->fi_delegations);
spin_unlock(&recall_lock);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0;
+out_free:
+ put_nfs4_file(fp);
+ dp->dl_file = fp;
+ return status;
}
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
@@ -3134,7 +3143,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
if (dp == NULL)
goto out_no_deleg;
- status = nfs4_set_delegation(dp);
+ status = nfs4_set_delegation(dp, stp->st_file);
if (status)
goto out_free;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 8a404576fb2..b4f788e0ca3 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -51,10 +51,6 @@ static struct posix_acl *ocfs2_acl_from_xattr(const void *value, size_t size)
return ERR_PTR(-EINVAL);
count = size / sizeof(struct posix_acl_entry);
- if (count < 0)
- return ERR_PTR(-EINVAL);
- if (count == 0)
- return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 94417a85ce6..f37d3c0e205 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2044,7 +2044,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
out_write_size:
pos += copied;
- if (pos > inode->i_size) {
+ if (pos > i_size_read(inode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 5c1c864e81c..363f0dcc924 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -628,11 +628,9 @@ static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
struct o2nm_node *node,
int idx)
{
- struct list_head *iter;
struct o2hb_callback_func *f;
- list_for_each(iter, &hbcall->list) {
- f = list_entry(iter, struct o2hb_callback_func, hc_item);
+ list_for_each_entry(f, &hbcall->list, hc_item) {
mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
(f->hc_func)(node, idx, f->hc_data);
}
@@ -641,16 +639,9 @@ static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
/* Will run the list in order until we process the passed event */
static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
{
- int empty;
struct o2hb_callback *hbcall;
struct o2hb_node_event *event;
- spin_lock(&o2hb_live_lock);
- empty = list_empty(&queued_event->hn_item);
- spin_unlock(&o2hb_live_lock);
- if (empty)
- return;
-
/* Holding callback sem assures we don't alter the callback
* lists when doing this, and serializes ourselves with other
* processes wanting callbacks. */
@@ -709,6 +700,7 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
struct o2hb_node_event event =
{ .hn_item = LIST_HEAD_INIT(event.hn_item), };
struct o2nm_node *node;
+ int queued = 0;
node = o2nm_get_node_by_num(slot->ds_node_num);
if (!node)
@@ -726,11 +718,13 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
slot->ds_node_num);
+ queued = 1;
}
}
spin_unlock(&o2hb_live_lock);
- o2hb_run_event_list(&event);
+ if (queued)
+ o2hb_run_event_list(&event);
o2nm_node_put(node);
}
@@ -790,6 +784,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
unsigned int slot_dead_ms;
int tmp;
+ int queued = 0;
memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
@@ -883,6 +878,7 @@ fire_callbacks:
slot->ds_node_num);
changed = 1;
+ queued = 1;
}
list_add_tail(&slot->ds_live_item,
@@ -934,6 +930,7 @@ fire_callbacks:
node, slot->ds_node_num);
changed = 1;
+ queued = 1;
}
/* We don't clear this because the node is still
@@ -949,7 +946,8 @@ fire_callbacks:
out:
spin_unlock(&o2hb_live_lock);
- o2hb_run_event_list(&event);
+ if (queued)
+ o2hb_run_event_list(&event);
if (node)
o2nm_node_put(node);
@@ -2516,8 +2514,7 @@ unlock:
int o2hb_register_callback(const char *region_uuid,
struct o2hb_callback_func *hc)
{
- struct o2hb_callback_func *tmp;
- struct list_head *iter;
+ struct o2hb_callback_func *f;
struct o2hb_callback *hbcall;
int ret;
@@ -2540,10 +2537,9 @@ int o2hb_register_callback(const char *region_uuid,
down_write(&o2hb_callback_sem);
- list_for_each(iter, &hbcall->list) {
- tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
- if (hc->hc_priority < tmp->hc_priority) {
- list_add_tail(&hc->hc_item, iter);
+ list_for_each_entry(f, &hbcall->list, hc_item) {
+ if (hc->hc_priority < f->hc_priority) {
+ list_add_tail(&hc->hc_item, &f->hc_item);
break;
}
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index d644dc61142..2cd2406b414 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -543,8 +543,9 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_NOTICE "o2net: No longer connected to "
- SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
+ if (old_sc)
+ printk(KERN_NOTICE "o2net: No longer connected to "
+ SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -765,32 +766,32 @@ static struct o2net_msg_handler *
o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
struct rb_node **ret_parent)
{
- struct rb_node **p = &o2net_handler_tree.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node **p = &o2net_handler_tree.rb_node;
+ struct rb_node *parent = NULL;
struct o2net_msg_handler *nmh, *ret = NULL;
int cmp;
- while (*p) {
- parent = *p;
- nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
+ while (*p) {
+ parent = *p;
+ nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
cmp = o2net_handler_cmp(nmh, msg_type, key);
- if (cmp < 0)
- p = &(*p)->rb_left;
- else if (cmp > 0)
- p = &(*p)->rb_right;
- else {
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
ret = nmh;
- break;
+ break;
}
- }
+ }
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
- return ret;
+ return ret;
}
static void o2net_handler_kref_release(struct kref *kref)
@@ -1695,13 +1696,12 @@ static void o2net_start_connect(struct work_struct *work)
ret = 0;
out:
- if (ret) {
+ if (ret && sc) {
printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
" failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
- if (sc)
- o2net_ensure_shutdown(nn, sc, 0);
+ o2net_ensure_shutdown(nn, sc, 0);
}
if (sc)
sc_put(sc);
@@ -1873,12 +1873,16 @@ static int o2net_accept_one(struct socket *sock)
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
- printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
- "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
- "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port), node->nd_name,
- node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ if (local_node)
+ printk(KERN_NOTICE "o2net: Unexpected connect attempt "
+ "seen at node '%s' (%u, %pI4:%d) from "
+ "node '%s' (%u, %pI4:%d)\n",
+ local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port),
+ node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index fbec0be6232..b46278f9ae4 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -292,7 +292,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
struct dlm_lock *lock = NULL;
struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
char *name;
- struct list_head *iter, *head=NULL;
+ struct list_head *head = NULL;
__be64 cookie;
u32 flags;
u8 node;
@@ -373,8 +373,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
/* try convert queue for both ast/bast */
head = &res->converting;
lock = NULL;
- list_for_each(iter, head) {
- lock = list_entry (iter, struct dlm_lock, list);
+ list_for_each_entry(lock, head, list) {
if (lock->ml.cookie == cookie)
goto do_ast;
}
@@ -385,8 +384,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
else
head = &res->granted;
- list_for_each(iter, head) {
- lock = list_entry (iter, struct dlm_lock, list);
+ list_for_each_entry(lock, head, list) {
if (lock->ml.cookie == cookie)
goto do_ast;
}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index de854cca12a..e0517762fcc 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1079,11 +1079,9 @@ static inline int dlm_lock_compatible(int existing, int request)
static inline int dlm_lock_on_list(struct list_head *head,
struct dlm_lock *lock)
{
- struct list_head *iter;
struct dlm_lock *tmplock;
- list_for_each(iter, head) {
- tmplock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(tmplock, head, list) {
if (tmplock == lock)
return 1;
}
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 29a886d1e82..e36d63ff178 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -123,7 +123,6 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
int *kick_thread)
{
enum dlm_status status = DLM_NORMAL;
- struct list_head *iter;
struct dlm_lock *tmplock=NULL;
assert_spin_locked(&res->spinlock);
@@ -185,16 +184,14 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
/* upconvert from here on */
status = DLM_NORMAL;
- list_for_each(iter, &res->granted) {
- tmplock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(tmplock, &res->granted, list) {
if (tmplock == lock)
continue;
if (!dlm_lock_compatible(tmplock->ml.type, type))
goto switch_queues;
}
- list_for_each(iter, &res->converting) {
- tmplock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(tmplock, &res->converting, list) {
if (!dlm_lock_compatible(tmplock->ml.type, type))
goto switch_queues;
/* existing conversion requests take precedence */
@@ -424,8 +421,8 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
struct dlm_ctxt *dlm = data;
struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
- struct list_head *iter;
struct dlm_lock *lock = NULL;
+ struct dlm_lock *tmp_lock;
struct dlm_lockstatus *lksb;
enum dlm_status status = DLM_NORMAL;
u32 flags;
@@ -471,14 +468,13 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
dlm_error(status);
goto leave;
}
- list_for_each(iter, &res->granted) {
- lock = list_entry(iter, struct dlm_lock, list);
- if (lock->ml.cookie == cnv->cookie &&
- lock->ml.node == cnv->node_idx) {
+ list_for_each_entry(tmp_lock, &res->granted, list) {
+ if (tmp_lock->ml.cookie == cnv->cookie &&
+ tmp_lock->ml.node == cnv->node_idx) {
+ lock = tmp_lock;
dlm_lock_get(lock);
break;
}
- lock = NULL;
}
spin_unlock(&res->spinlock);
if (!lock) {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 0e28e242226..e33cd7a3c58 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -96,7 +96,6 @@ static void __dlm_print_lock(struct dlm_lock *lock)
void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
{
- struct list_head *iter2;
struct dlm_lock *lock;
char buf[DLM_LOCKID_NAME_MAX];
@@ -118,18 +117,15 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
res->inflight_locks, atomic_read(&res->asts_reserved));
dlm_print_lockres_refmap(res);
printk(" granted queue:\n");
- list_for_each(iter2, &res->granted) {
- lock = list_entry(iter2, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->granted, list) {
__dlm_print_lock(lock);
}
printk(" converting queue:\n");
- list_for_each(iter2, &res->converting) {
- lock = list_entry(iter2, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->converting, list) {
__dlm_print_lock(lock);
}
printk(" blocked queue:\n");
- list_for_each(iter2, &res->blocked) {
- lock = list_entry(iter2, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->blocked, list) {
__dlm_print_lock(lock);
}
}
@@ -446,7 +442,6 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
{
struct dlm_master_list_entry *mle;
struct hlist_head *bucket;
- struct hlist_node *list;
int i, out = 0;
unsigned long total = 0, longest = 0, bucket_count = 0;
@@ -456,9 +451,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
spin_lock(&dlm->master_lock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
- hlist_for_each(list, bucket) {
- mle = hlist_entry(list, struct dlm_master_list_entry,
- master_hash_node);
+ hlist_for_each_entry(mle, bucket, master_hash_node) {
++total;
++bucket_count;
if (len - out < 200)
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index dbb17c07656..8b3382abf84 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -193,7 +193,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
unsigned int hash)
{
struct hlist_head *bucket;
- struct hlist_node *list;
+ struct dlm_lock_resource *res;
mlog(0, "%.*s\n", len, name);
@@ -201,9 +201,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
bucket = dlm_lockres_hash(dlm, hash);
- hlist_for_each(list, bucket) {
- struct dlm_lock_resource *res = hlist_entry(list,
- struct dlm_lock_resource, hash_node);
+ hlist_for_each_entry(res, bucket, hash_node) {
if (res->lockname.name[0] != name[0])
continue;
if (unlikely(res->lockname.len != len))
@@ -262,22 +260,19 @@ struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
{
- struct dlm_ctxt *tmp = NULL;
- struct list_head *iter;
+ struct dlm_ctxt *tmp;
assert_spin_locked(&dlm_domain_lock);
/* tmp->name here is always NULL terminated,
* but domain may not be! */
- list_for_each(iter, &dlm_domains) {
- tmp = list_entry (iter, struct dlm_ctxt, list);
+ list_for_each_entry(tmp, &dlm_domains, list) {
if (strlen(tmp->name) == len &&
memcmp(tmp->name, domain, len)==0)
- break;
- tmp = NULL;
+ return tmp;
}
- return tmp;
+ return NULL;
}
/* For null terminated domain strings ONLY */
@@ -366,25 +361,22 @@ static void __dlm_get(struct dlm_ctxt *dlm)
* you shouldn't trust your pointer. */
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
{
- struct list_head *iter;
- struct dlm_ctxt *target = NULL;
+ struct dlm_ctxt *target;
+ struct dlm_ctxt *ret = NULL;
spin_lock(&dlm_domain_lock);
- list_for_each(iter, &dlm_domains) {
- target = list_entry (iter, struct dlm_ctxt, list);
-
+ list_for_each_entry(target, &dlm_domains, list) {
if (target == dlm) {
__dlm_get(target);
+ ret = target;
break;
}
-
- target = NULL;
}
spin_unlock(&dlm_domain_lock);
- return target;
+ return ret;
}
int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
@@ -2296,13 +2288,10 @@ static DECLARE_RWSEM(dlm_callback_sem);
void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
int node_num)
{
- struct list_head *iter;
struct dlm_eviction_cb *cb;
down_read(&dlm_callback_sem);
- list_for_each(iter, &dlm->dlm_eviction_callbacks) {
- cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
-
+ list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
cb->ec_func(node_num, cb->ec_data);
}
up_read(&dlm_callback_sem);
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 47e67c2d228..5d32f7511f7 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -91,19 +91,14 @@ void dlm_destroy_lock_cache(void)
static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
- struct list_head *iter;
struct dlm_lock *tmplock;
- list_for_each(iter, &res->granted) {
- tmplock = list_entry(iter, struct dlm_lock, list);
-
+ list_for_each_entry(tmplock, &res->granted, list) {
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
}
- list_for_each(iter, &res->converting) {
- tmplock = list_entry(iter, struct dlm_lock, list);
-
+ list_for_each_entry(tmplock, &res->converting, list) {
if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
return 0;
if (!dlm_lock_compatible(tmplock->ml.convert_type,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 33ecbe0e673..cf0f103963b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -342,16 +342,13 @@ static int dlm_find_mle(struct dlm_ctxt *dlm,
{
struct dlm_master_list_entry *tmpmle;
struct hlist_head *bucket;
- struct hlist_node *list;
unsigned int hash;
assert_spin_locked(&dlm->master_lock);
hash = dlm_lockid_hash(name, namelen);
bucket = dlm_master_hash(dlm, hash);
- hlist_for_each(list, bucket) {
- tmpmle = hlist_entry(list, struct dlm_master_list_entry,
- master_hash_node);
+ hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
continue;
dlm_get_mle(tmpmle);
@@ -3183,7 +3180,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
struct dlm_master_list_entry *mle;
struct dlm_lock_resource *res;
struct hlist_head *bucket;
- struct hlist_node *list;
+ struct hlist_node *tmp;
unsigned int i;
mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
@@ -3194,10 +3191,7 @@ top:
spin_lock(&dlm->master_lock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
- hlist_for_each(list, bucket) {
- mle = hlist_entry(list, struct dlm_master_list_entry,
- master_hash_node);
-
+ hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
BUG_ON(mle->type != DLM_MLE_BLOCK &&
mle->type != DLM_MLE_MASTER &&
mle->type != DLM_MLE_MIGRATION);
@@ -3378,7 +3372,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm)
int i;
struct hlist_head *bucket;
struct dlm_master_list_entry *mle;
- struct hlist_node *tmp, *list;
+ struct hlist_node *tmp;
/*
* We notified all other nodes that we are exiting the domain and
@@ -3394,9 +3388,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm)
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
- hlist_for_each_safe(list, tmp, bucket) {
- mle = hlist_entry(list, struct dlm_master_list_entry,
- master_hash_node);
+ hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
if (mle->type != DLM_MLE_BLOCK) {
mlog(ML_ERROR, "bad mle: %p\n", mle);
dlm_print_one_mle(mle);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 773bd32bfd8..0b5adca1b17 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -787,6 +787,7 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
{
struct dlm_lock_request lr;
int ret;
+ int status;
mlog(0, "\n");
@@ -800,13 +801,15 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
// send message
ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
- &lr, sizeof(lr), request_from, NULL);
+ &lr, sizeof(lr), request_from, &status);
/* negative status is handled by caller */
if (ret < 0)
mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
"to recover dead node %u\n", dlm->name, ret,
request_from, dead_node);
+ else
+ ret = status;
// return from here, then
// sleep until all received or error
return ret;
@@ -2328,6 +2331,14 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
+ } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ if (test_bit(dead_node, res->refmap)) {
+ mlog(0, "%s:%.*s: dead node %u had a ref, but had "
+ "no locks and had not purged before dying\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, dead_node);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+ }
}
spin_unlock(&res->spinlock);
}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index e73c833fc2a..9db869de829 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -286,8 +286,6 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
struct dlm_lock *lock, *target;
- struct list_head *iter;
- struct list_head *head;
int can_grant = 1;
/*
@@ -314,9 +312,7 @@ converting:
dlm->name, res->lockname.len, res->lockname.name);
BUG();
}
- head = &res->granted;
- list_for_each(iter, head) {
- lock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->granted, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
@@ -333,9 +329,8 @@ converting:
target->ml.convert_type;
}
}
- head = &res->converting;
- list_for_each(iter, head) {
- lock = list_entry(iter, struct dlm_lock, list);
+
+ list_for_each_entry(lock, &res->converting, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
@@ -384,9 +379,7 @@ blocked:
goto leave;
target = list_entry(res->blocked.next, struct dlm_lock, list);
- head = &res->granted;
- list_for_each(iter, head) {
- lock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->granted, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
@@ -400,9 +393,7 @@ blocked:
}
}
- head = &res->converting;
- list_for_each(iter, head) {
- lock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(lock, &res->converting, list) {
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 850aa7e8753..5698b52cf5c 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -388,7 +388,6 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
struct dlm_ctxt *dlm = data;
struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
struct dlm_lock_resource *res = NULL;
- struct list_head *iter;
struct dlm_lock *lock = NULL;
enum dlm_status status = DLM_NORMAL;
int found = 0, i;
@@ -458,8 +457,7 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
}
for (i=0; i<3; i++) {
- list_for_each(iter, queue) {
- lock = list_entry(iter, struct dlm_lock, list);
+ list_for_each_entry(lock, queue, list) {
if (lock->ml.cookie == unlock->cookie &&
lock->ml.node == unlock->node_idx) {
dlm_lock_get(lock);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 12bafb7265c..efa2b3d339e 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -401,11 +401,8 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
umode_t mode = S_IFDIR | 0755;
- struct dlmfs_inode_private *ip;
if (inode) {
- ip = DLMFS_I(inode);
-
inode->i_ino = get_next_ino();
inode_init_owner(inode, NULL, mode);
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 2487116d0d3..767370b656c 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
cpos = map_start >> osb->s_clustersize_bits;
mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
map_start + map_len);
- mapping_end -= cpos;
is_last = 0;
while (cpos < mapping_end && !is_last) {
u32 fe_flags;
@@ -852,20 +851,20 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
down_read(&OCFS2_I(inode)->ip_alloc_sem);
- if (*offset >= inode->i_size) {
+ if (*offset >= i_size_read(inode)) {
ret = -ENXIO;
goto out_unlock;
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
if (whence == SEEK_HOLE)
- *offset = inode->i_size;
+ *offset = i_size_read(inode);
goto out_unlock;
}
clen = 0;
cpos = *offset >> cs_bits;
- cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+ cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
while (cpos < cend && !is_last) {
ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
@@ -904,8 +903,8 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
extlen = clen;
extlen <<= cs_bits;
- if ((extoff + extlen) > inode->i_size)
- extlen = inode->i_size - extoff;
+ if ((extoff + extlen) > i_size_read(inode))
+ extlen = i_size_read(inode) - extoff;
extoff += extlen;
if (extoff > *offset)
*offset = extoff;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3261d71319e..4f8197caa48 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -671,11 +671,7 @@ restarted_transaction:
} else {
BUG_ON(why != RESTART_TRANS);
- /* TODO: This can be more intelligent. */
- credits = ocfs2_calc_extend_credits(osb->sb,
- &fe->id2.i_list,
- clusters_to_add);
- status = ocfs2_extend_trans(handle, credits);
+ status = ocfs2_allocate_extend_trans(handle, 1);
if (status < 0) {
/* handle still has to be committed at
* this point. */
@@ -1800,6 +1796,7 @@ static int ocfs2_remove_inode_range(struct inode *inode,
ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
out:
+ ocfs2_free_path(path);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 0c60ef2d805..fa32ce9b455 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -303,7 +303,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
if (o2info_from_user(oij, req))
goto bail;
- oij.ij_journal_size = osb->journal->j_inode->i_size;
+ oij.ij_journal_size = i_size_read(osb->journal->j_inode);
o2info_set_request_filled(&oij.ij_req);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 242170d8397..44fc3e530c3 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -455,6 +455,41 @@ bail:
return status;
}
+/*
+ * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
+ * If that fails, restart the transaction & regain write access for the
+ * buffer head which is used for metadata modifications.
+ * Taken from Ext4: extend_or_restart_transaction()
+ */
+int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
+{
+ int status, old_nblks;
+
+ BUG_ON(!handle);
+
+ old_nblks = handle->h_buffer_credits;
+ trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
+
+ if (old_nblks < thresh)
+ return 0;
+
+ status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (status > 0) {
+ status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+bail:
+ return status;
+}
+
+
struct ocfs2_triggers {
struct jbd2_buffer_trigger_type ot_triggers;
int ot_offset;
@@ -801,14 +836,14 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
inode_lock = 1;
di = (struct ocfs2_dinode *)bh->b_data;
- if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
+ if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) {
mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
- inode->i_size);
+ i_size_read(inode));
status = -EINVAL;
goto done;
}
- trace_ocfs2_journal_init(inode->i_size,
+ trace_ocfs2_journal_init(i_size_read(inode),
(unsigned long long)inode->i_blocks,
OCFS2_I(inode)->ip_clusters);
@@ -1096,7 +1131,7 @@ static int ocfs2_force_read_journal(struct inode *inode)
memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
- num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
+ num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
v_blkno = 0;
while (v_blkno < num_blocks) {
status = ocfs2_extent_map_get_blocks(inode, v_blkno,
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 0a992737dca..0b479bab367 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -258,6 +258,17 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb,
int ocfs2_commit_trans(struct ocfs2_super *osb,
handle_t *handle);
int ocfs2_extend_trans(handle_t *handle, int nblocks);
+int ocfs2_allocate_extend_trans(handle_t *handle,
+ int thresh);
+
+/*
+ * Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction. For unbounded transactions such as
+ * fallocate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go.
+ */
+#define OCFS2_MAX_TRANS_DATA 64U
/*
* Create access is for when we get a newly created buffer and we're
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index aebeacd807c..cd5496b7a0a 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -1082,7 +1082,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
}
retry_enospc:
- (*ac)->ac_bits_wanted = osb->local_alloc_default_bits;
+ (*ac)->ac_bits_wanted = osb->local_alloc_bits;
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
if (status == -ENOSPC) {
if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
@@ -1154,7 +1154,7 @@ retry_enospc:
OCFS2_LA_DISABLED)
goto bail;
- ac->ac_bits_wanted = osb->local_alloc_default_bits;
+ ac->ac_bits_wanted = osb->local_alloc_bits;
status = ocfs2_claim_clusters(handle, ac,
osb->local_alloc_bits,
&cluster_off,
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 452068b4574..3d3f3c83065 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -152,6 +152,7 @@ static int __ocfs2_move_extent(handle_t *handle,
}
out:
+ ocfs2_free_path(path);
return ret;
}
@@ -845,7 +846,7 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
struct ocfs2_move_extents *range = context->range;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- if ((inode->i_size == 0) || (range->me_len == 0))
+ if ((i_size_read(inode) == 0) || (range->me_len == 0))
return 0;
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 3b481f49063..1b60c62aa9d 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2579,6 +2579,8 @@ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
+
DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access);
DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 332a281f217..aaa50611ec6 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -234,7 +234,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
}
- if (gqinode->i_size < off + len) {
+ if (i_size_read(gqinode) < off + len) {
loff_t rounded_end =
ocfs2_align_bytes_to_blocks(sb, off + len);
@@ -778,8 +778,8 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
*/
WARN_ON(journal_current_handle());
status = ocfs2_extend_no_holes(gqinode, NULL,
- gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
- gqinode->i_size);
+ i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
+ i_size_read(gqinode));
if (status < 0)
goto out_dq;
}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 27fe7ee4874..2e4344be3b9 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -982,14 +982,14 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
/* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, NULL,
- lqinode->i_size + 2 * sb->s_blocksize,
- lqinode->i_size);
+ i_size_read(lqinode) + 2 * sb->s_blocksize,
+ i_size_read(lqinode));
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
- lqinode->i_size + 2 * sb->s_blocksize);
+ i_size_read(lqinode) + 2 * sb->s_blocksize);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1125,14 +1125,14 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
/* We are protected by dqio_sem so no locking needed */
status = ocfs2_extend_no_holes(lqinode, NULL,
- lqinode->i_size + sb->s_blocksize,
- lqinode->i_size);
+ i_size_read(lqinode) + sb->s_blocksize,
+ i_size_read(lqinode));
if (status < 0) {
mlog_errno(status);
goto out;
}
status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh,
- lqinode->i_size + sb->s_blocksize);
+ i_size_read(lqinode) + sb->s_blocksize);
if (status < 0) {
mlog_errno(status);
goto out;
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a70d604593b..bf4dfc14bb2 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3854,7 +3854,10 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
while (cpos < clusters) {
ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
-
+ if (ret) {
+ mlog_errno(ret);
+ goto unlock;
+ }
if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
ret = ocfs2_add_refcount_flag(inode, &di_et,
&ref_tree->rf_ci,
@@ -4025,7 +4028,10 @@ static int ocfs2_duplicate_extent_list(struct inode *s_inode,
while (cpos < clusters) {
ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
-
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
if (p_cluster) {
ret = ocfs2_add_refcounted_extent(t_inode, &et,
ref_ci, ref_root_bh,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 317ef0abccb..6ce0686eab7 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3505,7 +3505,7 @@ int ocfs2_xattr_set(struct inode *inode,
int ret, credits, ref_meta = 0, ref_credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
- struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
+ struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_xattr_info xi = {
@@ -3609,13 +3609,14 @@ int ocfs2_xattr_set(struct inode *inode,
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
- goto cleanup;
+ goto out_free_ac;
}
ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt);
ocfs2_commit_trans(osb, ctxt.handle);
+out_free_ac:
if (ctxt.data_ac)
ocfs2_free_alloc_context(ctxt.data_ac);
if (ctxt.meta_ac)
@@ -5881,6 +5882,10 @@ static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, el, &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
cpos += num_clusters;
if ((ext_flags & OCFS2_EXT_REFCOUNTED))
@@ -6797,7 +6802,7 @@ out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
- meta_ac = NULL;
+ *meta_ac = NULL;
}
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 0ff80f9b930..985ea881b5b 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -286,7 +286,7 @@ int proc_fd_permission(struct inode *inode, int mask)
int rv = generic_permission(inode, mask);
if (rv == 0)
return 0;
- if (task_pid(current) == proc_pid(inode))
+ if (task_tgid(current) == proc_pid(inode))
rv = 0;
return rv;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 107d026f5d6..7366e9d63ce 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -740,6 +740,9 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
ptent = pte_file_clear_soft_dirty(ptent);
}
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ vma->vm_flags &= ~VM_SOFTDIRTY;
+
set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}
@@ -949,13 +952,15 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
if (is_migration_entry(entry))
page = migration_entry_to_page(entry);
} else {
- *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags2 |= __PM_SOFT_DIRTY;
+ *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
return;
}
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (pte_soft_dirty(pte))
+ if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
flags2 |= __PM_SOFT_DIRTY;
*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
@@ -974,7 +979,7 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *p
*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
else
- *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
}
#else
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
@@ -997,7 +1002,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
int pmd_flags2;
- pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0);
+ if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
+ pmd_flags2 = __PM_SOFT_DIRTY;
+ else
+ pmd_flags2 = 0;
+
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
@@ -1015,12 +1024,17 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
return 0;
for (; addr != end; addr += PAGE_SIZE) {
+ int flags2;
/* check to see if we've left 'vma' behind
* and need a new, higher one */
if (vma && (addr >= vma->vm_end)) {
vma = find_vma(walk->mm, addr);
- pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+ flags2 = __PM_SOFT_DIRTY;
+ else
+ flags2 = 0;
+ pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
}
/* check that 'vma' actually covers this address,
@@ -1044,13 +1058,15 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
#ifdef CONFIG_HUGETLB_PAGE
static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
- pte_t pte, int offset)
+ pte_t pte, int offset, int flags2)
{
if (pte_present(pte))
- *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
- | PM_STATUS2(pm->v2, 0) | PM_PRESENT);
+ *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
+ PM_STATUS2(pm->v2, flags2) |
+ PM_PRESENT);
else
- *pme = make_pme(PM_NOT_PRESENT(pm->v2));
+ *pme = make_pme(PM_NOT_PRESENT(pm->v2) |
+ PM_STATUS2(pm->v2, flags2));
}
/* This function walks within one hugetlb entry in the single call */
@@ -1059,12 +1075,22 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
+ struct vm_area_struct *vma;
int err = 0;
+ int flags2;
pagemap_entry_t pme;
+ vma = find_vma(walk->mm, addr);
+ WARN_ON_ONCE(!vma);
+
+ if (vma && (vma->vm_flags & VM_SOFTDIRTY))
+ flags2 = __PM_SOFT_DIRTY;
+ else
+ flags2 = 0;
+
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
- huge_pte_to_pagemap_entry(&pme, pm, *pte, offset);
+ huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
@@ -1376,8 +1402,10 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
walk.mm = mm;
pol = get_vma_policy(task, vma, vma->vm_start);
- mpol_to_str(buffer, sizeof(buffer), pol);
+ n = mpol_to_str(buffer, sizeof(buffer), pol);
mpol_cond_put(pol);
+ if (n < 0)
+ return n;
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index a1a16eb97c7..9100d695988 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -21,6 +21,7 @@
#include <linux/crash_dump.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include "internal.h"
@@ -123,11 +124,65 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
return read;
}
+/*
+ * Architectures may override this function to allocate ELF header in 2nd kernel
+ */
+int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
+{
+ return 0;
+}
+
+/*
+ * Architectures may override this function to free header
+ */
+void __weak elfcorehdr_free(unsigned long long addr)
+{}
+
+/*
+ * Architectures may override this function to read from ELF header
+ */
+ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+ return read_from_oldmem(buf, count, ppos, 0);
+}
+
+/*
+ * Architectures may override this function to read from notes sections
+ */
+ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+{
+ return read_from_oldmem(buf, count, ppos, 0);
+}
+
+/*
+ * Architectures may override this function to map oldmem
+ */
+int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, from, pfn, size, prot);
+}
+
+/*
+ * Copy to either kernel or user space
+ */
+static int copy_to(void *target, void *src, size_t size, int userbuf)
+{
+ if (userbuf) {
+ if (copy_to_user((char __user *) target, src, size))
+ return -EFAULT;
+ } else {
+ memcpy(target, src, size);
+ }
+ return 0;
+}
+
/* Read from the ELF header and then the crash dump. On error, negative value is
* returned otherwise number of bytes read are returned.
*/
-static ssize_t read_vmcore(struct file *file, char __user *buffer,
- size_t buflen, loff_t *fpos)
+static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
+ int userbuf)
{
ssize_t acc = 0, tmp;
size_t tsz;
@@ -144,7 +199,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
/* Read ELF core header */
if (*fpos < elfcorebuf_sz) {
tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
- if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
+ if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
return -EFAULT;
buflen -= tsz;
*fpos += tsz;
@@ -162,7 +217,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
- if (copy_to_user(buffer, kaddr, tsz))
+ if (copy_to(buffer, kaddr, tsz, userbuf))
return -EFAULT;
buflen -= tsz;
*fpos += tsz;
@@ -178,7 +233,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
if (*fpos < m->offset + m->size) {
tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
start = m->paddr + *fpos - m->offset;
- tmp = read_from_oldmem(buffer, tsz, &start, 1);
+ tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
if (tmp < 0)
return tmp;
buflen -= tsz;
@@ -195,6 +250,55 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
return acc;
}
+static ssize_t read_vmcore(struct file *file, char __user *buffer,
+ size_t buflen, loff_t *fpos)
+{
+ return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+}
+
+/*
+ * The vmcore fault handler uses the page cache and fills data using the
+ * standard __vmcore_read() function.
+ *
+ * On s390 the fault handler is used for memory regions that can't be mapped
+ * directly with remap_pfn_range().
+ */
+static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#ifdef CONFIG_S390
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t index = vmf->pgoff;
+ struct page *page;
+ loff_t offset;
+ char *buf;
+ int rc;
+
+ page = find_or_create_page(mapping, index, GFP_KERNEL);
+ if (!page)
+ return VM_FAULT_OOM;
+ if (!PageUptodate(page)) {
+ offset = (loff_t) index << PAGE_CACHE_SHIFT;
+ buf = __va((page_to_pfn(page) << PAGE_SHIFT));
+ rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
+ if (rc < 0) {
+ unlock_page(page);
+ page_cache_release(page);
+ return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ }
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ vmf->page = page;
+ return 0;
+#else
+ return VM_FAULT_SIGBUS;
+#endif
+}
+
+static const struct vm_operations_struct vmcore_mmap_ops = {
+ .fault = mmap_vmcore_fault,
+};
+
/**
* alloc_elfnotes_buf - allocate buffer for ELF note segment in
* vmalloc memory
@@ -223,7 +327,7 @@ static inline char *alloc_elfnotes_buf(size_t notes_sz)
* regions in the 1st kernel pointed to by PT_LOAD entries) into
* virtually contiguous user-space in ELF layout.
*/
-#if defined(CONFIG_MMU) && !defined(CONFIG_S390)
+#ifdef CONFIG_MMU
static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
@@ -241,6 +345,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_ops = &vmcore_mmap_ops;
len = 0;
@@ -282,9 +387,9 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
tsz = min_t(size_t, m->offset + m->size - start, size);
paddr = m->paddr + start - m->offset;
- if (remap_pfn_range(vma, vma->vm_start + len,
- paddr >> PAGE_SHIFT, tsz,
- vma->vm_page_prot))
+ if (remap_oldmem_pfn_range(vma, vma->vm_start + len,
+ paddr >> PAGE_SHIFT, tsz,
+ vma->vm_page_prot))
goto fail;
size -= tsz;
start += tsz;
@@ -357,7 +462,7 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
notes_section = kmalloc(max_sz, GFP_KERNEL);
if (!notes_section)
return -ENOMEM;
- rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
+ rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
if (rc < 0) {
kfree(notes_section);
return rc;
@@ -444,7 +549,8 @@ static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
if (phdr_ptr->p_type != PT_NOTE)
continue;
offset = phdr_ptr->p_offset;
- rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
+ rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
+ &offset);
if (rc < 0)
return rc;
notes_buf += phdr_ptr->p_memsz;
@@ -536,7 +642,7 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
notes_section = kmalloc(max_sz, GFP_KERNEL);
if (!notes_section)
return -ENOMEM;
- rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
+ rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
if (rc < 0) {
kfree(notes_section);
return rc;
@@ -623,7 +729,8 @@ static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
if (phdr_ptr->p_type != PT_NOTE)
continue;
offset = phdr_ptr->p_offset;
- rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
+ rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
+ &offset);
if (rc < 0)
return rc;
notes_buf += phdr_ptr->p_memsz;
@@ -810,7 +917,7 @@ static int __init parse_crash_elf64_headers(void)
addr = elfcorehdr_addr;
/* Read Elf header */
- rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
+ rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
if (rc < 0)
return rc;
@@ -837,7 +944,7 @@ static int __init parse_crash_elf64_headers(void)
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
- rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
+ rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
if (rc < 0)
goto fail;
@@ -866,7 +973,7 @@ static int __init parse_crash_elf32_headers(void)
addr = elfcorehdr_addr;
/* Read Elf header */
- rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
+ rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
if (rc < 0)
return rc;
@@ -892,7 +999,7 @@ static int __init parse_crash_elf32_headers(void)
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
- rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
+ rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
if (rc < 0)
goto fail;
@@ -919,7 +1026,7 @@ static int __init parse_crash_elf_headers(void)
int rc=0;
addr = elfcorehdr_addr;
- rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
+ rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
if (rc < 0)
return rc;
if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
@@ -952,7 +1059,14 @@ static int __init vmcore_init(void)
{
int rc = 0;
- /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
+ /* Allow architectures to allocate ELF header in 2nd kernel */
+ rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
+ if (rc)
+ return rc;
+ /*
+ * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
+ * then capture the dump.
+ */
if (!(is_vmcore_usable()))
return rc;
rc = parse_crash_elf_headers();
@@ -960,6 +1074,8 @@ static int __init vmcore_init(void)
pr_warn("Kdump: vmcore not initialized\n");
return rc;
}
+ elfcorehdr_free(elfcorehdr_addr);
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
if (proc_vmcore)
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index c24f1e10b94..39d14659a8d 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -244,12 +244,6 @@ struct dentry *ramfs_mount(struct file_system_type *fs_type,
return mount_nodev(fs_type, flags, data, ramfs_fill_super);
}
-static struct dentry *rootfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
-}
-
static void ramfs_kill_sb(struct super_block *sb)
{
kfree(sb->s_fs_info);
@@ -262,29 +256,23 @@ static struct file_system_type ramfs_fs_type = {
.kill_sb = ramfs_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
-static struct file_system_type rootfs_fs_type = {
- .name = "rootfs",
- .mount = rootfs_mount,
- .kill_sb = kill_litter_super,
-};
-static int __init init_ramfs_fs(void)
-{
- return register_filesystem(&ramfs_fs_type);
-}
-module_init(init_ramfs_fs)
-
-int __init init_rootfs(void)
+int __init init_ramfs_fs(void)
{
+ static unsigned long once;
int err;
+ if (test_and_set_bit(0, &once))
+ return 0;
+
err = bdi_init(&ramfs_backing_dev_info);
if (err)
return err;
- err = register_filesystem(&rootfs_fs_type);
+ err = register_filesystem(&ramfs_fs_type);
if (err)
bdi_destroy(&ramfs_backing_dev_info);
return err;
}
+module_init(init_ramfs_fs)
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index fb50652e4e1..41d108ecc9b 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -167,17 +167,14 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
/*
* Block is uncompressed.
*/
- int i, in, pg_offset = 0;
-
- for (i = 0; i < b; i++) {
- wait_on_buffer(bh[i]);
- if (!buffer_uptodate(bh[i]))
- goto block_release;
- }
+ int in, pg_offset = 0;
for (bytes = length; k < b; k++) {
in = min(bytes, msblk->devblksize - offset);
bytes -= in;
+ wait_on_buffer(bh[k]);
+ if (!buffer_uptodate(bh[k]))
+ goto block_release;
while (in) {
if (pg_offset == PAGE_CACHE_SIZE) {
page++;
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index f7f527bf8c1..d8c2d747be2 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -54,6 +54,7 @@ static int get_dir_index_using_offset(struct super_block *sb,
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int err, i, index, length = 0;
+ unsigned int size;
struct squashfs_dir_index dir_index;
TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
@@ -81,8 +82,14 @@ static int get_dir_index_using_offset(struct super_block *sb,
*/
break;
+ size = le32_to_cpu(dir_index.size) + 1;
+
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ break;
+
err = squashfs_read_metadata(sb, NULL, &index_start,
- &index_offset, le32_to_cpu(dir_index.size) + 1);
+ &index_offset, size);
if (err < 0)
break;
@@ -105,9 +112,8 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
u64 block = squashfs_i(inode)->start + msblk->directory_table;
- int offset = squashfs_i(inode)->offset, length, dir_count, size,
- type, err;
- unsigned int inode_number;
+ int offset = squashfs_i(inode)->offset, length, err;
+ unsigned int inode_number, dir_count, size, type;
struct squashfs_dir_header dirh;
struct squashfs_dir_entry *dire;
@@ -200,6 +206,9 @@ static int squashfs_readdir(struct file *file, struct dir_context *ctx)
((short) le16_to_cpu(dire->inode_number));
type = le16_to_cpu(dire->type);
+ if (type > SQUASHFS_MAX_DIR_TYPE)
+ goto failed_read;
+
if (!dir_emit(ctx, dire->name, size,
inode_number,
squashfs_filetype_table[type]))
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 7834a517f7f..67cad77fefb 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -79,7 +79,8 @@ static int get_dir_index_using_name(struct super_block *sb,
int len)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- int i, size, length = 0, err;
+ int i, length = 0, err;
+ unsigned int size;
struct squashfs_dir_index *index;
char *str;
@@ -103,6 +104,8 @@ static int get_dir_index_using_name(struct super_block *sb,
size = le32_to_cpu(index->size) + 1;
+ if (size > SQUASHFS_NAME_LEN)
+ break;
err = squashfs_read_metadata(sb, index->name, &index_start,
&index_offset, size);
@@ -144,7 +147,8 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
struct squashfs_dir_entry *dire;
u64 block = squashfs_i(dir)->start + msblk->directory_table;
int offset = squashfs_i(dir)->offset;
- int err, length, dir_count, size;
+ int err, length;
+ unsigned int dir_count, size;
TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 9e2349d07cb..4b2beda4949 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -87,7 +87,7 @@
#define SQUASHFS_COMP_OPTS(flags) SQUASHFS_BIT(flags, \
SQUASHFS_COMP_OPT)
-/* Max number of types and file types */
+/* Inode types including extended types */
#define SQUASHFS_DIR_TYPE 1
#define SQUASHFS_REG_TYPE 2
#define SQUASHFS_SYMLINK_TYPE 3
@@ -103,6 +103,9 @@
#define SQUASHFS_LFIFO_TYPE 13
#define SQUASHFS_LSOCKET_TYPE 14
+/* Max type value stored in directory entry */
+#define SQUASHFS_MAX_DIR_TYPE 7
+
/* Xattr types */
#define SQUASHFS_XATTR_USER 0
#define SQUASHFS_XATTR_TRUSTED 1
diff --git a/fs/super.c b/fs/super.c
index 5536a95186e..f6961ea84c5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,7 +71,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
if (!grab_super_passive(sb))
return -1;
- if (sb->s_op && sb->s_op->nr_cached_objects)
+ if (sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb);
total_objects = sb->s_nr_dentry_unused +
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 63d609d8a3f..3abfa6ea226 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -26,6 +26,7 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
+#include <drm/i915_pciids.h>
#include <uapi/drm/i915_drm.h>
/* For use by IPS driver */
@@ -34,4 +35,37 @@ extern bool i915_gpu_raise(void);
extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void);
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+
+#define I830_GMCH_CTRL 0x52
+
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 00000000000..8a10f5c354e
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _I915_PCIIDS_H
+#define _I915_PCIIDS_H
+
+/*
+ * A pci_device_id struct {
+ * __u32 vendor, device;
+ * __u32 subvendor, subdevice;
+ * __u32 class, class_mask;
+ * kernel_ulong_t driver_data;
+ * };
+ * Don't use C99 here because "class" is reserved and we want to
+ * give userspace flexibility.
+ */
+#define INTEL_VGA_DEVICE(id, info) { \
+ 0x8086, id, \
+ ~0, ~0, \
+ 0x030000, 0xff0000, \
+ (unsigned long) info }
+
+#define INTEL_QUANTA_VGA_DEVICE(info) { \
+ 0x8086, 0x16a, \
+ 0x152d, 0x8990, \
+ 0x030000, 0xff0000, \
+ (unsigned long) info }
+
+#define INTEL_I830_IDS(info) \
+ INTEL_VGA_DEVICE(0x3577, info)
+
+#define INTEL_I845G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2562, info)
+
+#define INTEL_I85X_IDS(info) \
+ INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
+ INTEL_VGA_DEVICE(0x358e, info)
+
+#define INTEL_I865G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
+
+#define INTEL_I915G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
+ INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
+
+#define INTEL_I915GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
+
+#define INTEL_I945G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
+
+#define INTEL_I945GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
+ INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
+
+#define INTEL_I965G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
+ INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
+ INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
+ INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
+
+#define INTEL_G33_IDS(info) \
+ INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
+ INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
+ INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
+
+#define INTEL_I965GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
+ INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
+
+#define INTEL_GM45_IDS(info) \
+ INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
+
+#define INTEL_G45_IDS(info) \
+ INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
+ INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
+ INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
+ INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
+ INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
+ INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
+
+#define INTEL_PINEVIEW_IDS(info) \
+ INTEL_VGA_DEVICE(0xa001, info), \
+ INTEL_VGA_DEVICE(0xa011, info)
+
+#define INTEL_IRONLAKE_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0042, info)
+
+#define INTEL_IRONLAKE_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0046, info)
+
+#define INTEL_SNB_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0102, info), \
+ INTEL_VGA_DEVICE(0x0112, info), \
+ INTEL_VGA_DEVICE(0x0122, info), \
+ INTEL_VGA_DEVICE(0x010A, info)
+
+#define INTEL_SNB_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0106, info), \
+ INTEL_VGA_DEVICE(0x0116, info), \
+ INTEL_VGA_DEVICE(0x0126, info)
+
+#define INTEL_IVB_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
+
+#define INTEL_IVB_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
+
+#define INTEL_IVB_Q_IDS(info) \
+ INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
+
+#define INTEL_HSW_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+ INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+ INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
+ INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
+ INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
+ INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
+ INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+ INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+ INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
+ INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
+ INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
+ INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
+
+#define INTEL_HSW_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
+
+#define INTEL_VLV_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0f30, info), \
+ INTEL_VGA_DEVICE(0x0f31, info), \
+ INTEL_VGA_DEVICE(0x0f32, info), \
+ INTEL_VGA_DEVICE(0x0f33, info), \
+ INTEL_VGA_DEVICE(0x0157, info)
+
+#define INTEL_VLV_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0155, info)
+
+#endif /* _I915_PCIIDS_H */
diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h
new file mode 100644
index 00000000000..042e7b3b629
--- /dev/null
+++ b/include/dt-bindings/input/input.h
@@ -0,0 +1,525 @@
+/*
+ * This header provides constants for most input bindings.
+ *
+ * Most input bindings include key code, matrix key code format.
+ * In most cases, key code and matrix key code format uses
+ * the standard values/macro defined in this header.
+ */
+
+#ifndef _DT_BINDINGS_INPUT_INPUT_H
+#define _DT_BINDINGS_INPUT_INPUT_H
+
+#define KEY_RESERVED 0
+#define KEY_ESC 1
+#define KEY_1 2
+#define KEY_2 3
+#define KEY_3 4
+#define KEY_4 5
+#define KEY_5 6
+#define KEY_6 7
+#define KEY_7 8
+#define KEY_8 9
+#define KEY_9 10
+#define KEY_0 11
+#define KEY_MINUS 12
+#define KEY_EQUAL 13
+#define KEY_BACKSPACE 14
+#define KEY_TAB 15
+#define KEY_Q 16
+#define KEY_W 17
+#define KEY_E 18
+#define KEY_R 19
+#define KEY_T 20
+#define KEY_Y 21
+#define KEY_U 22
+#define KEY_I 23
+#define KEY_O 24
+#define KEY_P 25
+#define KEY_LEFTBRACE 26
+#define KEY_RIGHTBRACE 27
+#define KEY_ENTER 28
+#define KEY_LEFTCTRL 29
+#define KEY_A 30
+#define KEY_S 31
+#define KEY_D 32
+#define KEY_F 33
+#define KEY_G 34
+#define KEY_H 35
+#define KEY_J 36
+#define KEY_K 37
+#define KEY_L 38
+#define KEY_SEMICOLON 39
+#define KEY_APOSTROPHE 40
+#define KEY_GRAVE 41
+#define KEY_LEFTSHIFT 42
+#define KEY_BACKSLASH 43
+#define KEY_Z 44
+#define KEY_X 45
+#define KEY_C 46
+#define KEY_V 47
+#define KEY_B 48
+#define KEY_N 49
+#define KEY_M 50
+#define KEY_COMMA 51
+#define KEY_DOT 52
+#define KEY_SLASH 53
+#define KEY_RIGHTSHIFT 54
+#define KEY_KPASTERISK 55
+#define KEY_LEFTALT 56
+#define KEY_SPACE 57
+#define KEY_CAPSLOCK 58
+#define KEY_F1 59
+#define KEY_F2 60
+#define KEY_F3 61
+#define KEY_F4 62
+#define KEY_F5 63
+#define KEY_F6 64
+#define KEY_F7 65
+#define KEY_F8 66
+#define KEY_F9 67
+#define KEY_F10 68
+#define KEY_NUMLOCK 69
+#define KEY_SCROLLLOCK 70
+#define KEY_KP7 71
+#define KEY_KP8 72
+#define KEY_KP9 73
+#define KEY_KPMINUS 74
+#define KEY_KP4 75
+#define KEY_KP5 76
+#define KEY_KP6 77
+#define KEY_KPPLUS 78
+#define KEY_KP1 79
+#define KEY_KP2 80
+#define KEY_KP3 81
+#define KEY_KP0 82
+#define KEY_KPDOT 83
+
+#define KEY_ZENKAKUHANKAKU 85
+#define KEY_102ND 86
+#define KEY_F11 87
+#define KEY_F12 88
+#define KEY_RO 89
+#define KEY_KATAKANA 90
+#define KEY_HIRAGANA 91
+#define KEY_HENKAN 92
+#define KEY_KATAKANAHIRAGANA 93
+#define KEY_MUHENKAN 94
+#define KEY_KPJPCOMMA 95
+#define KEY_KPENTER 96
+#define KEY_RIGHTCTRL 97
+#define KEY_KPSLASH 98
+#define KEY_SYSRQ 99
+#define KEY_RIGHTALT 100
+#define KEY_LINEFEED 101
+#define KEY_HOME 102
+#define KEY_UP 103
+#define KEY_PAGEUP 104
+#define KEY_LEFT 105
+#define KEY_RIGHT 106
+#define KEY_END 107
+#define KEY_DOWN 108
+#define KEY_PAGEDOWN 109
+#define KEY_INSERT 110
+#define KEY_DELETE 111
+#define KEY_MACRO 112
+#define KEY_MUTE 113
+#define KEY_VOLUMEDOWN 114
+#define KEY_VOLUMEUP 115
+#define KEY_POWER 116 /* SC System Power Down */
+#define KEY_KPEQUAL 117
+#define KEY_KPPLUSMINUS 118
+#define KEY_PAUSE 119
+#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */
+
+#define KEY_KPCOMMA 121
+#define KEY_HANGEUL 122
+#define KEY_HANGUEL KEY_HANGEUL
+#define KEY_HANJA 123
+#define KEY_YEN 124
+#define KEY_LEFTMETA 125
+#define KEY_RIGHTMETA 126
+#define KEY_COMPOSE 127
+
+#define KEY_STOP 128 /* AC Stop */
+#define KEY_AGAIN 129
+#define KEY_PROPS 130 /* AC Properties */
+#define KEY_UNDO 131 /* AC Undo */
+#define KEY_FRONT 132
+#define KEY_COPY 133 /* AC Copy */
+#define KEY_OPEN 134 /* AC Open */
+#define KEY_PASTE 135 /* AC Paste */
+#define KEY_FIND 136 /* AC Search */
+#define KEY_CUT 137 /* AC Cut */
+#define KEY_HELP 138 /* AL Integrated Help Center */
+#define KEY_MENU 139 /* Menu (show menu) */
+#define KEY_CALC 140 /* AL Calculator */
+#define KEY_SETUP 141
+#define KEY_SLEEP 142 /* SC System Sleep */
+#define KEY_WAKEUP 143 /* System Wake Up */
+#define KEY_FILE 144 /* AL Local Machine Browser */
+#define KEY_SENDFILE 145
+#define KEY_DELETEFILE 146
+#define KEY_XFER 147
+#define KEY_PROG1 148
+#define KEY_PROG2 149
+#define KEY_WWW 150 /* AL Internet Browser */
+#define KEY_MSDOS 151
+#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */
+#define KEY_SCREENLOCK KEY_COFFEE
+#define KEY_DIRECTION 153
+#define KEY_CYCLEWINDOWS 154
+#define KEY_MAIL 155
+#define KEY_BOOKMARKS 156 /* AC Bookmarks */
+#define KEY_COMPUTER 157
+#define KEY_BACK 158 /* AC Back */
+#define KEY_FORWARD 159 /* AC Forward */
+#define KEY_CLOSECD 160
+#define KEY_EJECTCD 161
+#define KEY_EJECTCLOSECD 162
+#define KEY_NEXTSONG 163
+#define KEY_PLAYPAUSE 164
+#define KEY_PREVIOUSSONG 165
+#define KEY_STOPCD 166
+#define KEY_RECORD 167
+#define KEY_REWIND 168
+#define KEY_PHONE 169 /* Media Select Telephone */
+#define KEY_ISO 170
+#define KEY_CONFIG 171 /* AL Consumer Control Configuration */
+#define KEY_HOMEPAGE 172 /* AC Home */
+#define KEY_REFRESH 173 /* AC Refresh */
+#define KEY_EXIT 174 /* AC Exit */
+#define KEY_MOVE 175
+#define KEY_EDIT 176
+#define KEY_SCROLLUP 177
+#define KEY_SCROLLDOWN 178
+#define KEY_KPLEFTPAREN 179
+#define KEY_KPRIGHTPAREN 180
+#define KEY_NEW 181 /* AC New */
+#define KEY_REDO 182 /* AC Redo/Repeat */
+
+#define KEY_F13 183
+#define KEY_F14 184
+#define KEY_F15 185
+#define KEY_F16 186
+#define KEY_F17 187
+#define KEY_F18 188
+#define KEY_F19 189
+#define KEY_F20 190
+#define KEY_F21 191
+#define KEY_F22 192
+#define KEY_F23 193
+#define KEY_F24 194
+
+#define KEY_PLAYCD 200
+#define KEY_PAUSECD 201
+#define KEY_PROG3 202
+#define KEY_PROG4 203
+#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_SUSPEND 205
+#define KEY_CLOSE 206 /* AC Close */
+#define KEY_PLAY 207
+#define KEY_FASTFORWARD 208
+#define KEY_BASSBOOST 209
+#define KEY_PRINT 210 /* AC Print */
+#define KEY_HP 211
+#define KEY_CAMERA 212
+#define KEY_SOUND 213
+#define KEY_QUESTION 214
+#define KEY_EMAIL 215
+#define KEY_CHAT 216
+#define KEY_SEARCH 217
+#define KEY_CONNECT 218
+#define KEY_FINANCE 219 /* AL Checkbook/Finance */
+#define KEY_SPORT 220
+#define KEY_SHOP 221
+#define KEY_ALTERASE 222
+#define KEY_CANCEL 223 /* AC Cancel */
+#define KEY_BRIGHTNESSDOWN 224
+#define KEY_BRIGHTNESSUP 225
+#define KEY_MEDIA 226
+
+#define KEY_SWITCHVIDEOMODE 227 /* Cycle between available video
+ outputs (Monitor/LCD/TV-out/etc) */
+#define KEY_KBDILLUMTOGGLE 228
+#define KEY_KBDILLUMDOWN 229
+#define KEY_KBDILLUMUP 230
+
+#define KEY_SEND 231 /* AC Send */
+#define KEY_REPLY 232 /* AC Reply */
+#define KEY_FORWARDMAIL 233 /* AC Forward Msg */
+#define KEY_SAVE 234 /* AC Save */
+#define KEY_DOCUMENTS 235
+
+#define KEY_BATTERY 236
+
+#define KEY_BLUETOOTH 237
+#define KEY_WLAN 238
+#define KEY_UWB 239
+
+#define KEY_UNKNOWN 240
+
+#define KEY_VIDEO_NEXT 241 /* drive next video source */
+#define KEY_VIDEO_PREV 242 /* drive previous video source */
+#define KEY_BRIGHTNESS_CYCLE 243 /* brightness up, after max is min */
+#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */
+#define KEY_DISPLAY_OFF 245 /* display device to off state */
+
+#define KEY_WIMAX 246
+#define KEY_RFKILL 247 /* Key that controls all radios */
+
+#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
+
+/* Code 255 is reserved for special needs of AT keyboard driver */
+
+#define BTN_MISC 0x100
+#define BTN_0 0x100
+#define BTN_1 0x101
+#define BTN_2 0x102
+#define BTN_3 0x103
+#define BTN_4 0x104
+#define BTN_5 0x105
+#define BTN_6 0x106
+#define BTN_7 0x107
+#define BTN_8 0x108
+#define BTN_9 0x109
+
+#define BTN_MOUSE 0x110
+#define BTN_LEFT 0x110
+#define BTN_RIGHT 0x111
+#define BTN_MIDDLE 0x112
+#define BTN_SIDE 0x113
+#define BTN_EXTRA 0x114
+#define BTN_FORWARD 0x115
+#define BTN_BACK 0x116
+#define BTN_TASK 0x117
+
+#define BTN_JOYSTICK 0x120
+#define BTN_TRIGGER 0x120
+#define BTN_THUMB 0x121
+#define BTN_THUMB2 0x122
+#define BTN_TOP 0x123
+#define BTN_TOP2 0x124
+#define BTN_PINKIE 0x125
+#define BTN_BASE 0x126
+#define BTN_BASE2 0x127
+#define BTN_BASE3 0x128
+#define BTN_BASE4 0x129
+#define BTN_BASE5 0x12a
+#define BTN_BASE6 0x12b
+#define BTN_DEAD 0x12f
+
+#define BTN_GAMEPAD 0x130
+#define BTN_SOUTH 0x130
+#define BTN_A BTN_SOUTH
+#define BTN_EAST 0x131
+#define BTN_B BTN_EAST
+#define BTN_C 0x132
+#define BTN_NORTH 0x133
+#define BTN_X BTN_NORTH
+#define BTN_WEST 0x134
+#define BTN_Y BTN_WEST
+#define BTN_Z 0x135
+#define BTN_TL 0x136
+#define BTN_TR 0x137
+#define BTN_TL2 0x138
+#define BTN_TR2 0x139
+#define BTN_SELECT 0x13a
+#define BTN_START 0x13b
+#define BTN_MODE 0x13c
+#define BTN_THUMBL 0x13d
+#define BTN_THUMBR 0x13e
+
+#define BTN_DIGI 0x140
+#define BTN_TOOL_PEN 0x140
+#define BTN_TOOL_RUBBER 0x141
+#define BTN_TOOL_BRUSH 0x142
+#define BTN_TOOL_PENCIL 0x143
+#define BTN_TOOL_AIRBRUSH 0x144
+#define BTN_TOOL_FINGER 0x145
+#define BTN_TOOL_MOUSE 0x146
+#define BTN_TOOL_LENS 0x147
+#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */
+#define BTN_TOUCH 0x14a
+#define BTN_STYLUS 0x14b
+#define BTN_STYLUS2 0x14c
+#define BTN_TOOL_DOUBLETAP 0x14d
+#define BTN_TOOL_TRIPLETAP 0x14e
+#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */
+
+#define BTN_WHEEL 0x150
+#define BTN_GEAR_DOWN 0x150
+#define BTN_GEAR_UP 0x151
+
+#define KEY_OK 0x160
+#define KEY_SELECT 0x161
+#define KEY_GOTO 0x162
+#define KEY_CLEAR 0x163
+#define KEY_POWER2 0x164
+#define KEY_OPTION 0x165
+#define KEY_INFO 0x166 /* AL OEM Features/Tips/Tutorial */
+#define KEY_TIME 0x167
+#define KEY_VENDOR 0x168
+#define KEY_ARCHIVE 0x169
+#define KEY_PROGRAM 0x16a /* Media Select Program Guide */
+#define KEY_CHANNEL 0x16b
+#define KEY_FAVORITES 0x16c
+#define KEY_EPG 0x16d
+#define KEY_PVR 0x16e /* Media Select Home */
+#define KEY_MHP 0x16f
+#define KEY_LANGUAGE 0x170
+#define KEY_TITLE 0x171
+#define KEY_SUBTITLE 0x172
+#define KEY_ANGLE 0x173
+#define KEY_ZOOM 0x174
+#define KEY_MODE 0x175
+#define KEY_KEYBOARD 0x176
+#define KEY_SCREEN 0x177
+#define KEY_PC 0x178 /* Media Select Computer */
+#define KEY_TV 0x179 /* Media Select TV */
+#define KEY_TV2 0x17a /* Media Select Cable */
+#define KEY_VCR 0x17b /* Media Select VCR */
+#define KEY_VCR2 0x17c /* VCR Plus */
+#define KEY_SAT 0x17d /* Media Select Satellite */
+#define KEY_SAT2 0x17e
+#define KEY_CD 0x17f /* Media Select CD */
+#define KEY_TAPE 0x180 /* Media Select Tape */
+#define KEY_RADIO 0x181
+#define KEY_TUNER 0x182 /* Media Select Tuner */
+#define KEY_PLAYER 0x183
+#define KEY_TEXT 0x184
+#define KEY_DVD 0x185 /* Media Select DVD */
+#define KEY_AUX 0x186
+#define KEY_MP3 0x187
+#define KEY_AUDIO 0x188 /* AL Audio Browser */
+#define KEY_VIDEO 0x189 /* AL Movie Browser */
+#define KEY_DIRECTORY 0x18a
+#define KEY_LIST 0x18b
+#define KEY_MEMO 0x18c /* Media Select Messages */
+#define KEY_CALENDAR 0x18d
+#define KEY_RED 0x18e
+#define KEY_GREEN 0x18f
+#define KEY_YELLOW 0x190
+#define KEY_BLUE 0x191
+#define KEY_CHANNELUP 0x192 /* Channel Increment */
+#define KEY_CHANNELDOWN 0x193 /* Channel Decrement */
+#define KEY_FIRST 0x194
+#define KEY_LAST 0x195 /* Recall Last */
+#define KEY_AB 0x196
+#define KEY_NEXT 0x197
+#define KEY_RESTART 0x198
+#define KEY_SLOW 0x199
+#define KEY_SHUFFLE 0x19a
+#define KEY_BREAK 0x19b
+#define KEY_PREVIOUS 0x19c
+#define KEY_DIGITS 0x19d
+#define KEY_TEEN 0x19e
+#define KEY_TWEN 0x19f
+#define KEY_VIDEOPHONE 0x1a0 /* Media Select Video Phone */
+#define KEY_GAMES 0x1a1 /* Media Select Games */
+#define KEY_ZOOMIN 0x1a2 /* AC Zoom In */
+#define KEY_ZOOMOUT 0x1a3 /* AC Zoom Out */
+#define KEY_ZOOMRESET 0x1a4 /* AC Zoom */
+#define KEY_WORDPROCESSOR 0x1a5 /* AL Word Processor */
+#define KEY_EDITOR 0x1a6 /* AL Text Editor */
+#define KEY_SPREADSHEET 0x1a7 /* AL Spreadsheet */
+#define KEY_GRAPHICSEDITOR 0x1a8 /* AL Graphics Editor */
+#define KEY_PRESENTATION 0x1a9 /* AL Presentation App */
+#define KEY_DATABASE 0x1aa /* AL Database App */
+#define KEY_NEWS 0x1ab /* AL Newsreader */
+#define KEY_VOICEMAIL 0x1ac /* AL Voicemail */
+#define KEY_ADDRESSBOOK 0x1ad /* AL Contacts/Address Book */
+#define KEY_MESSENGER 0x1ae /* AL Instant Messaging */
+#define KEY_DISPLAYTOGGLE 0x1af /* Turn display (LCD) on and off */
+#define KEY_SPELLCHECK 0x1b0 /* AL Spell Check */
+#define KEY_LOGOFF 0x1b1 /* AL Logoff */
+
+#define KEY_DOLLAR 0x1b2
+#define KEY_EURO 0x1b3
+
+#define KEY_FRAMEBACK 0x1b4 /* Consumer - transport controls */
+#define KEY_FRAMEFORWARD 0x1b5
+#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
+#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
+#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
+#define KEY_IMAGES 0x1ba /* AL Image Browser */
+
+#define KEY_DEL_EOL 0x1c0
+#define KEY_DEL_EOS 0x1c1
+#define KEY_INS_LINE 0x1c2
+#define KEY_DEL_LINE 0x1c3
+
+#define KEY_FN 0x1d0
+#define KEY_FN_ESC 0x1d1
+#define KEY_FN_F1 0x1d2
+#define KEY_FN_F2 0x1d3
+#define KEY_FN_F3 0x1d4
+#define KEY_FN_F4 0x1d5
+#define KEY_FN_F5 0x1d6
+#define KEY_FN_F6 0x1d7
+#define KEY_FN_F7 0x1d8
+#define KEY_FN_F8 0x1d9
+#define KEY_FN_F9 0x1da
+#define KEY_FN_F10 0x1db
+#define KEY_FN_F11 0x1dc
+#define KEY_FN_F12 0x1dd
+#define KEY_FN_1 0x1de
+#define KEY_FN_2 0x1df
+#define KEY_FN_D 0x1e0
+#define KEY_FN_E 0x1e1
+#define KEY_FN_F 0x1e2
+#define KEY_FN_S 0x1e3
+#define KEY_FN_B 0x1e4
+
+#define KEY_BRL_DOT1 0x1f1
+#define KEY_BRL_DOT2 0x1f2
+#define KEY_BRL_DOT3 0x1f3
+#define KEY_BRL_DOT4 0x1f4
+#define KEY_BRL_DOT5 0x1f5
+#define KEY_BRL_DOT6 0x1f6
+#define KEY_BRL_DOT7 0x1f7
+#define KEY_BRL_DOT8 0x1f8
+#define KEY_BRL_DOT9 0x1f9
+#define KEY_BRL_DOT10 0x1fa
+
+#define KEY_NUMERIC_0 0x200 /* used by phones, remote controls, */
+#define KEY_NUMERIC_1 0x201 /* and other keypads */
+#define KEY_NUMERIC_2 0x202
+#define KEY_NUMERIC_3 0x203
+#define KEY_NUMERIC_4 0x204
+#define KEY_NUMERIC_5 0x205
+#define KEY_NUMERIC_6 0x206
+#define KEY_NUMERIC_7 0x207
+#define KEY_NUMERIC_8 0x208
+#define KEY_NUMERIC_9 0x209
+#define KEY_NUMERIC_STAR 0x20a
+#define KEY_NUMERIC_POUND 0x20b
+
+#define KEY_CAMERA_FOCUS 0x210
+#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */
+
+#define KEY_TOUCHPAD_TOGGLE 0x212 /* Request switch touchpad on or off */
+#define KEY_TOUCHPAD_ON 0x213
+#define KEY_TOUCHPAD_OFF 0x214
+
+#define KEY_CAMERA_ZOOMIN 0x215
+#define KEY_CAMERA_ZOOMOUT 0x216
+#define KEY_CAMERA_UP 0x217
+#define KEY_CAMERA_DOWN 0x218
+#define KEY_CAMERA_LEFT 0x219
+#define KEY_CAMERA_RIGHT 0x21a
+
+#define KEY_ATTENDANT_ON 0x21b
+#define KEY_ATTENDANT_OFF 0x21c
+#define KEY_ATTENDANT_TOGGLE 0x21d /* Attendant call on or off */
+#define KEY_LIGHTS_TOGGLE 0x21e /* Reading light on or off */
+
+#define BTN_DPAD_UP 0x220
+#define BTN_DPAD_DOWN 0x221
+#define BTN_DPAD_LEFT 0x222
+#define BTN_DPAD_RIGHT 0x223
+
+#define MATRIX_KEY(row, col, code) \
+ ((((row) & 0xFF) << 24) | (((col) & 0xFF) << 16) | ((code) & 0xFFFF))
+
+#endif /* _DT_BINDINGS_INPUT_INPUT_H */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 3e7b62fbefb..91b84a7f053 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -87,6 +87,7 @@
#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
#define PL080_CONTROL_SB_SIZE_SHIFT (12)
#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
+#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0)
#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
#define PL080_BSIZE_1 (0x0)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c3881553f7d..5f66d519a72 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -243,6 +243,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_EXEC_MAP: Can be mapped for execution
*
* BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
+ *
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -254,6 +256,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_NO_ACCT_WB 0x00000080
#define BDI_CAP_SWAP_BACKED 0x00000100
#define BDI_CAP_STABLE_WRITES 0x00000200
+#define BDI_CAP_STRICTLIMIT 0x00000400
#define BDI_CAP_VMFLAGS \
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 70cf138690e..e8112ae5053 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -31,7 +31,7 @@ struct linux_binprm {
#ifdef __alpha__
unsigned int taso:1;
#endif
- unsigned int recursion_depth;
+ unsigned int recursion_depth; /* only for search_binary_handler() */
struct file * file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
new file mode 100644
index 00000000000..98e892ef6d5
--- /dev/null
+++ b/include/linux/cmdline-parser.h
@@ -0,0 +1,43 @@
+/*
+ * Parsing command line, get the partitions information.
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#ifndef CMDLINEPARSEH
+#define CMDLINEPARSEH
+
+#include <linux/blkdev.h>
+
+/* partition flags */
+#define PF_RDONLY 0x01 /* Device is read only */
+#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+ char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+ sector_t from;
+ sector_t size;
+ int flags;
+ struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+ char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+ unsigned int nr_subparts;
+ struct cmdline_subpart *subpart;
+ struct cmdline_parts *next_parts;
+};
+
+void cmdline_parts_free(struct cmdline_parts **parts);
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev);
+
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param);
+
+#endif /* CMDLINEPARSEH */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ec1aee4aec9..345da00a86e 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -43,6 +43,7 @@
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
{ \
return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 37e4f8da7cd..fe68a5a9858 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -12,6 +12,15 @@
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+ unsigned long long *size);
+extern void __weak elfcorehdr_free(unsigned long long addr);
+extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e151d4c9298..653073de09e 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -10,6 +10,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/math64.h>
#include <linux/ratelimit.h>
struct dm_dev;
@@ -550,6 +551,14 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+#define dm_sector_div64(x, y)( \
+{ \
+ u64 _res; \
+ (x) = div64_u64_rem(x, y, &_res); \
+ _res; \
+} \
+)
+
/*
* Ceiling(n / sz)
*/
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644
index 00000000000..2dc9b2bc18f
--- /dev/null
+++ b/include/linux/dma/mmp-pdma.h
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0c72b89a172..0bc72753410 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -373,6 +373,25 @@ struct dma_slave_config {
unsigned int slave_id;
};
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ * since the enum dma_transfer_direction is not defined as bits for each
+ * type of direction, the dma controller should fill (1 << <TYPE>) and same
+ * should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dstn_addr_widths;
+ u32 directions;
+ bool cmd_pause;
+ bool cmd_terminate;
+};
+
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
@@ -535,6 +554,7 @@ struct dma_tx_state {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
+ * @device_slave_caps: return the slave channel capabilities
*/
struct dma_device {
@@ -600,6 +620,7 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
};
static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -673,6 +694,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+ if (!chan || !caps)
+ return -EINVAL;
+
+ /* check if the channel supports slave transactions */
+ if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+ return -ENXIO;
+
+ if (chan->device->device_slave_caps)
+ return chan->device->device_slave_caps(chan, caps);
+
+ return -ENXIO;
+}
+
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -1006,6 +1042,7 @@ static inline void dma_release_channel(struct dma_chan *chan)
int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index cf5d2af61b8..ff0b981f078 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,7 +9,6 @@
#define _LINUX_EVENTFD_H
#include <linux/fcntl.h>
-#include <linux/file.h>
#include <linux/wait.h>
/*
@@ -26,6 +25,8 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct file;
+
#ifdef CONFIG_EVENTFD
struct file *eventfd_file_create(unsigned int count, int flags);
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
deleted file mode 100644
index 55d87023839..00000000000
--- a/include/linux/fsl/mxs-dma.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_DMA_H__
-#define __MACH_MXS_DMA_H__
-
-#include <linux/dmaengine.h>
-
-struct mxs_dma_data {
- int chan_irq;
-};
-
-extern int mxs_dma_is_apbh(struct dma_chan *chan);
-extern int mxs_dma_is_apbx(struct dma_chan *chan);
-#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 661d374aeb2..f8d41cb1cbe 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -66,8 +66,8 @@ struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
atomic_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
- unsigned long start_addr; /* starting address of memory chunk */
- unsigned long end_addr; /* ending address of memory chunk */
+ unsigned long start_addr; /* start address of memory chunk */
+ unsigned long end_addr; /* end address of memory chunk (inclusive) */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c2b1801a160..0393270466c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -66,6 +66,9 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
+bool isolate_huge_page(struct page *page, struct list_head *list);
+void putback_active_hugepage(struct page *page);
+bool is_hugepage_active(struct page *page);
void copy_huge_page(struct page *dst, struct page *src);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -134,6 +137,9 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
return 0;
}
+#define isolate_huge_page(p, l) false
+#define putback_active_hugepage(p) do {} while (0)
+#define is_hugepage_active(x) false
static inline void copy_huge_page(struct page *dst, struct page *src)
{
}
@@ -261,6 +267,8 @@ struct huge_bootmem_page {
};
struct page *alloc_huge_page_node(struct hstate *h, int nid);
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -371,9 +379,23 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
+extern void dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
+int pmd_huge_support(void);
+/*
+ * Currently hugepage migration is enabled only for pmd-based hugepage.
+ * This function will be updated when hugepage migration is more widely
+ * supported.
+ */
+static inline int hugepage_migration_support(struct hstate *h)
+{
+ return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_sizelog(s) NULL
@@ -396,6 +418,9 @@ static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
+#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define pmd_huge_support() 0
+#define hugepage_migration_support(h) 0
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index e73f2b70852..f1c27a71d03 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -153,6 +153,7 @@ extern unsigned int reset_devices;
void setup_arch(char **);
void prepare_namespace(void);
void __init load_default_modules(void);
+int __init init_rootfs(void);
extern void (*late_time_init)(void);
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c4d870b0d5e..19c19a5eee2 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -22,7 +22,7 @@ struct ipc_ids {
int in_use;
unsigned short seq;
unsigned short seq_max;
- struct rw_semaphore rw_mutex;
+ struct rw_semaphore rwsem;
struct idr ipcs_idr;
int next_id;
};
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ca1d27a0d6a..925eaf28fca 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -264,10 +264,36 @@ extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
-extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
+struct kprobe_insn_cache {
+ struct mutex mutex;
+ void *(*alloc)(void); /* allocate insn page */
+ void (*free)(void *); /* free insn page */
+ struct list_head pages; /* list of kprobe_insn_page */
+ size_t insn_size; /* size of instruction slot */
+ int nr_garbage;
+};
+
+extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
+extern void __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty);
+
+#define DEFINE_INSN_CACHE_OPS(__name) \
+extern struct kprobe_insn_cache kprobe_##__name##_slots; \
+ \
+static inline kprobe_opcode_t *get_##__name##_slot(void) \
+{ \
+ return __get_insn_slot(&kprobe_##__name##_slots); \
+} \
+ \
+static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
+{ \
+ __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
+} \
+
+DEFINE_INSN_CACHE_OPS(insn);
+
#ifdef CONFIG_OPTPROBES
/*
* Internal structure for direct jump optimized probe
@@ -287,13 +313,13 @@ extern void arch_optimize_kprobes(struct list_head *oplist);
extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
-extern kprobe_opcode_t *get_optinsn_slot(void);
-extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+DEFINE_INSN_CACHE_OPS(optinsn);
+
#ifdef CONFIG_SYSCTL
extern int sysctl_kprobes_optimization;
extern int proc_kprobes_optimization_handler(struct ctl_table *table,
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index d21c13f10a6..4356686b0a3 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -67,8 +67,8 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
* note : Destination buffer must be already allocated.
* slightly faster than lz4_decompress_unknownoutputsize()
*/
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
- size_t actual_dest_len);
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len);
/*
* lz4_decompress_unknownoutputsize()
@@ -82,6 +82,6 @@ int lz4_decompress(const char *src, size_t *src_len, char *dest,
* Error if return (< 0)
* note : Destination buffer must be already allocated.
*/
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
- char *dest, size_t *dest_len);
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+ unsigned char *dest, size_t *dest_len);
#endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 2913b86eb12..69ed5f5e9f6 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -31,6 +31,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
}
/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -63,6 +72,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
#ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f388203db7e..31e95acddb4 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -60,6 +60,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
+ unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 0d7df39a588..da6716b9e3f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -91,7 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
}
#define vma_policy(vma) ((vma)->vm_policy)
-#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
static inline void mpol_get(struct mempolicy *pol)
{
@@ -126,6 +125,7 @@ struct shared_policy {
spinlock_t lock;
};
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
@@ -173,7 +173,7 @@ extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
{
- if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
/*
* Migration allocates pages in the highest zone. If we cannot
@@ -240,7 +240,12 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
}
#define vma_policy(vma) NULL
-#define vma_set_policy(vma, pol) do {} while(0)
+
+static inline int
+vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+ return 0;
+}
static inline void numa_policy_init(void)
{
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index ce3511326f8..b22883d6050 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -108,7 +108,6 @@ struct tmio_mmc_data {
unsigned int cd_gpio;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
- int (*get_cd)(struct platform_device *host);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
/* clock management callbacks */
int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a405d3dc0f6..6fe52142063 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -41,8 +41,6 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, enum migrate_mode mode, int reason);
-extern int migrate_huge_page(struct page *, new_page_t x,
- unsigned long private, enum migrate_mode mode);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -62,9 +60,6 @@ static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, enum migrate_mode mode, int reason)
{ return -ENOSYS; }
-static inline int migrate_huge_page(struct page *page, new_page_t x,
- unsigned long private, enum migrate_mode mode)
- { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d2d59b4149d..caf543c7eaa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -115,6 +115,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
+#else
+# define VM_SOFTDIRTY 0
+#endif
+
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
@@ -489,20 +495,6 @@ static inline int compound_order(struct page *page)
return (unsigned long)page[1].lru.prev;
}
-static inline int compound_trans_order(struct page *page)
-{
- int order;
- unsigned long flags;
-
- if (!PageHead(page))
- return 0;
-
- flags = compound_lock_irqsave(page);
- order = compound_order(page);
- compound_unlock_irqrestore(page, flags);
- return order;
-}
-
static inline void set_compound_order(struct page *page, unsigned long order)
{
page[1].lru.prev = (void *)order;
@@ -637,12 +629,12 @@ static inline enum zone_type page_zonenum(const struct page *page)
#endif
/*
- * The identification function is only used by the buddy allocator for
- * determining if two pages could be buddies. We are not really
- * identifying a zone since we could be using a the section number
- * id if we have not node id available in page flags.
- * We guarantee only that it will return the same value for two
- * combinable pages in a zone.
+ * The identification function is mainly used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really identifying
+ * the zone since we could be using the section number id if we do not have
+ * node id available in page flags.
+ * We only guarantee that it will return the same value for two combinable
+ * pages in a zone.
*/
static inline int page_zone_id(struct page *page)
{
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1397ccf81e9..cf55945c83f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,6 +2,7 @@
#define LINUX_MM_INLINE_H
#include <linux/huge_mm.h>
+#include <linux/swap.h>
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 443243b241d..da51bec578c 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -208,6 +208,8 @@ static inline void mmc_claim_host(struct mmc_host *host)
__mmc_claim_host(host, NULL);
}
+struct device_node;
extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index e3c6a74d980..3e781b8c0be 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -171,6 +171,7 @@ struct sdhci_host {
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
unsigned int ocr_avail_mmc;
+ u32 ocr_mask; /* available voltages */
wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index e7d5dd67bb7..ccd8fb2cad5 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/sh_dma.h>
/*
* MMCIF : CE_CLK_CTRL [19:16]
@@ -33,12 +32,12 @@
*/
struct sh_mmcif_plat_data {
- void (*set_pwr)(struct platform_device *pdev, int state);
- void (*down_pwr)(struct platform_device *pdev);
int (*get_cd)(struct platform_device *pdef);
unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
unsigned int slave_id_rx;
bool use_cd_gpio : 1;
+ bool ccs_unsupported : 1;
+ bool clk_ctrl2_present : 1;
unsigned int cd_gpio;
u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
unsigned long caps;
@@ -62,6 +61,7 @@ struct sh_mmcif_plat_data {
#define MMCIF_CE_INT_MASK 0x00000044
#define MMCIF_CE_HOST_STS1 0x00000048
#define MMCIF_CE_HOST_STS2 0x0000004C
+#define MMCIF_CE_CLK_CTRL2 0x00000070
#define MMCIF_CE_VERSION 0x0000007C
/* CE_BUF_ACC */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index b76bcf0621f..68927ae5084 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -25,8 +25,6 @@ struct sh_mobile_sdhi_info {
unsigned long tmio_caps2;
u32 tmio_ocr_mask; /* available MMC voltages */
unsigned int cd_gpio;
- void (*set_pwr)(struct platform_device *pdev, int state);
- int (*get_cd)(struct platform_device *pdev);
/* callbacks for board specific setup code */
int (*init)(struct platform_device *pdev,
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 7d88d27bfaf..b0c73e4cace 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -18,7 +18,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
void mmc_gpio_free_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio);
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce);
void mmc_gpio_free_cd(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index af4a3b77a8d..bd791e452ad 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -105,6 +105,7 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_ALLOC_BATCH,
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
NR_ACTIVE_ANON, /* " " " " " */
@@ -352,7 +353,6 @@ struct zone {
* free areas of different sizes
*/
spinlock_t lock;
- int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
diff --git a/include/linux/namei.h b/include/linux/namei.h
index cd09751c71a..8e47bc7a166 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -58,7 +58,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-extern int user_path_umountat(int, const char __user *, unsigned int, struct path *);
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
@@ -71,8 +70,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
- const char *, unsigned int, struct path *);
+extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
diff --git a/include/linux/of.h b/include/linux/of.h
index 3a45c4f593a..f95aee391e3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -281,6 +281,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np,
extern int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
@@ -324,12 +327,6 @@ extern int of_detach_node(struct device_node *);
*/
const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
u32 *pu);
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- for (prop = of_find_property(np, propname, NULL), \
- p = of_prop_next_u32(prop, NULL, &u); \
- p; \
- p = of_prop_next_u32(prop, p, &u))
-
/*
* struct property *prop;
* const char *s;
@@ -338,11 +335,6 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
* printk("String value: %s\n", s);
*/
const char *of_prop_next_string(struct property *prop, const char *cur);
-#define of_property_for_each_string(np, propname, prop, s) \
- for (prop = of_find_property(np, propname, NULL), \
- s = of_prop_next_string(prop, NULL); \
- s; \
- s = of_prop_next_string(prop, s))
int of_device_is_stdout_path(struct device_node *dn);
@@ -497,6 +489,13 @@ static inline int of_parse_phandle_with_args(struct device_node *np,
return -ENOSYS;
}
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
static inline int of_count_phandle_with_args(struct device_node *np,
const char *list_name,
const char *cells_name)
@@ -519,12 +518,20 @@ static inline int of_device_is_stdout_path(struct device_node *dn)
return 0;
}
+static inline const __be32 *of_prop_next_u32(struct property *prop,
+ const __be32 *cur, u32 *pu)
+{
+ return NULL;
+}
+
+static inline const char *of_prop_next_string(struct property *prop,
+ const char *cur)
+{
+ return NULL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- while (0)
-#define of_property_for_each_string(np, propname, prop, s) \
- while (0)
#endif /* CONFIG_OF */
#ifndef of_node_to_nid
@@ -573,6 +580,18 @@ static inline int of_property_read_u32(const struct device_node *np,
return of_property_read_u32_array(np, propname, out_value, 1);
}
+#define of_property_for_each_u32(np, propname, prop, p, u) \
+ for (prop = of_find_property(np, propname, NULL), \
+ p = of_prop_next_u32(prop, NULL, &u); \
+ p; \
+ p = of_prop_next_u32(prop, p, &u))
+
+#define of_property_for_each_string(np, propname, prop, s) \
+ for (prop = of_find_property(np, propname, NULL), \
+ s = of_prop_next_string(prop, NULL); \
+ s; \
+ s = of_prop_next_string(prop, s))
+
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 19f26f8d220..a478c62a2aa 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -109,8 +109,7 @@ extern u64 dt_mem_next_cell(int s, __be32 **cellp);
* physical addresses.
*/
#ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end);
+extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
#endif
/* Early flat tree scan hooks */
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 61bf53b0277..34597c8c1a4 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,10 +9,10 @@
#ifdef CONFIG_OF_NET
#include <linux/of.h>
-extern const int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
#else
-static inline const int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np)
{
return -ENODEV;
}
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h
new file mode 100644
index 00000000000..648b8ea61a2
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-hpbdma.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_RCAR_HPBDMA_H
+#define __DMA_RCAR_HPBDMA_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* Transmit sizes and respective register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_MAX
+};
+
+/* DMA control register (DCR) bits */
+#define HPB_DMAE_DCR_DTAMD (1u << 26)
+#define HPB_DMAE_DCR_DTAC (1u << 25)
+#define HPB_DMAE_DCR_DTAU (1u << 24)
+#define HPB_DMAE_DCR_DTAU1 (1u << 23)
+#define HPB_DMAE_DCR_SWMD (1u << 22)
+#define HPB_DMAE_DCR_BTMD (1u << 21)
+#define HPB_DMAE_DCR_PKMD (1u << 20)
+#define HPB_DMAE_DCR_CT (1u << 18)
+#define HPB_DMAE_DCR_ACMD (1u << 17)
+#define HPB_DMAE_DCR_DIP (1u << 16)
+#define HPB_DMAE_DCR_SMDL (1u << 13)
+#define HPB_DMAE_DCR_SPDAM (1u << 12)
+#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10)
+#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10)
+#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10)
+#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10)
+#define HPB_DMAE_DCR_SPDS_MASK (3u << 8)
+#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8)
+#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8)
+#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8)
+#define HPB_DMAE_DCR_DMDL (1u << 5)
+#define HPB_DMAE_DCR_DPDAM (1u << 4)
+#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2)
+#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2)
+#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2)
+#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2)
+#define HPB_DMAE_DCR_DPDS_MASK (3u << 0)
+#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0)
+#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0)
+#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0)
+
+/* Asynchronous reset register (ASYNCRSTR) bits */
+#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10)
+#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9)
+#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8)
+#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7)
+#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6)
+#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5)
+#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4)
+#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3)
+#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2)
+#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1)
+#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0)
+
+struct hpb_dmae_slave_config {
+ unsigned int id;
+ dma_addr_t addr;
+ u32 dcr;
+ u32 port;
+ u32 rstr;
+ u32 mdr;
+ u32 mdm;
+ u32 flags;
+#define HPB_DMAE_SET_ASYNC_RESET BIT(0)
+#define HPB_DMAE_SET_ASYNC_MODE BIT(1)
+ u32 dma_ch;
+};
+
+#define HPB_DMAE_CHANNEL(_irq, _s_id) \
+{ \
+ .ch_irq = _irq, \
+ .s_id = _s_id, \
+}
+
+struct hpb_dmae_channel {
+ unsigned int ch_irq;
+ unsigned int s_id;
+};
+
+struct hpb_dmae_pdata {
+ const struct hpb_dmae_slave_config *slaves;
+ int num_slaves;
+ const struct hpb_dmae_channel *channels;
+ int num_channels;
+ const unsigned int ts_shift[XMIT_SZ_MAX];
+ int num_hw_channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 57300fd7cc0..179fb91bb5f 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -180,4 +180,6 @@ struct edma_soc_info {
const s16 (*xbar_chans)[2];
};
+int edma_trigger_channel(unsigned);
+
#endif
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 00000000000..9f0283721cb
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,16 @@
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+struct bq24190_platform_data {
+ unsigned int gpio_int; /* GPIO pin that's connected to INT# */
+};
+
+#endif
diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h
new file mode 100644
index 00000000000..23110dc7772
--- /dev/null
+++ b/include/linux/power/twl4030_madc_battery.h
@@ -0,0 +1,39 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TWL4030_MADC_BATTERY_H
+#define __TWL4030_MADC_BATTERY_H
+
+/*
+ * Usually we can assume 100% @ 4.15V and 0% @ 3.3V but curves differ for
+ * charging and discharging!
+ */
+
+struct twl4030_madc_bat_calibration {
+ short voltage; /* in mV - specify -1 for end of list */
+ short level; /* in percent (0 .. 100%) */
+};
+
+struct twl4030_madc_bat_platform_data {
+ unsigned int capacity; /* total capacity in uAh */
+ struct twl4030_madc_bat_calibration *charging;
+ int charging_size;
+ struct twl4030_madc_bat_calibration *discharging;
+ int discharging_size;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 804b90643a8..5c2600630dc 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,7 @@
#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/spinlock.h>
struct device;
@@ -194,6 +195,8 @@ struct power_supply {
/* private */
struct device *dev;
struct work_struct changed_work;
+ spinlock_t changed_lock;
+ bool changed;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c38b0..403940787be 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -231,6 +231,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_maybe_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 0f424698064..73069cb6c54 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -101,6 +101,7 @@ extern const struct raid6_calls raid6_altivec8;
extern const struct raid6_calls raid6_avx2x1;
extern const struct raid6_calls raid6_avx2x2;
extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_tilegx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 69e37c2d1ea..753207c8ce2 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -25,7 +25,7 @@ extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
-extern int __init init_rootfs(void);
+extern int __init init_ramfs_fs(void);
int ramfs_fill_super(struct super_block *sb, void *data, int silent);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 0022c1bb1e2..aa870a4ddf5 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -68,6 +68,10 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
@@ -81,4 +85,22 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
+ n = rb_entry(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); \
+ &pos->field; \
+ pos = n, \
+ n = rb_entry(rb_next_postorder(&pos->field), \
+ typeof(*pos), field))
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ce1e1c0aaa3..45f254dddaf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2169,15 +2169,15 @@ static inline bool thread_group_leader(struct task_struct *p)
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
{
- return p->pid == p->tgid;
+ return task_pid(p) == p->signal->leader_pid;
}
static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
- return p1->tgid == p2->tgid;
+ return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 4e83f3e034f..b7b43b82231 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -33,13 +33,44 @@ struct sh_dmae_slave_config {
char mid_rid;
};
+/**
+ * struct sh_dmae_channel - DMAC channel platform data
+ * @offset: register offset within the main IOMEM resource
+ * @dmars: channel DMARS register offset
+ * @chclr_offset: channel CHCLR register offset
+ * @dmars_bit: channel DMARS field offset within the register
+ * @chclr_bit: bit position, to be set to reset the channel
+ */
struct sh_dmae_channel {
unsigned int offset;
unsigned int dmars;
- unsigned int dmars_bit;
unsigned int chclr_offset;
+ unsigned char dmars_bit;
+ unsigned char chclr_bit;
};
+/**
+ * struct sh_dmae_pdata - DMAC platform data
+ * @slave: array of slaves
+ * @slave_num: number of slaves in the above array
+ * @channel: array of DMA channels
+ * @channel_num: number of channels in the above array
+ * @ts_low_shift: shift of the low part of the TS field
+ * @ts_low_mask: low TS field mask
+ * @ts_high_shift: additional shift of the high part of the TS field
+ * @ts_high_mask: high TS field mask
+ * @ts_shift: array of Transfer Size shifts, indexed by TS value
+ * @ts_shift_num: number of shifts in the above array
+ * @dmaor_init: DMAOR initialisation value
+ * @chcr_offset: CHCR address offset
+ * @chcr_ie_bit: CHCR Interrupt Enable bit
+ * @dmaor_is_32bit: DMAOR is a 32-bit register
+ * @needs_tend_set: the TEND register has to be set
+ * @no_dmars: DMAC has no DMARS registers
+ * @chclr_present: DMAC has one or several CHCLR registers
+ * @chclr_bitwise: channel CHCLR registers are bitwise
+ * @slave_only: DMAC cannot be used for MEMCPY
+ */
struct sh_dmae_pdata {
const struct sh_dmae_slave_config *slave;
int slave_num;
@@ -59,42 +90,22 @@ struct sh_dmae_pdata {
unsigned int needs_tend_set:1;
unsigned int no_dmars:1;
unsigned int chclr_present:1;
+ unsigned int chclr_bitwise:1;
unsigned int slave_only:1;
};
-/* DMA register */
-#define SAR 0x00
-#define DAR 0x04
-#define TCR 0x08
-#define CHCR 0x0C
-#define DMAOR 0x40
-
-#define TEND 0x18 /* USB-DMAC */
-
/* DMAOR definitions */
#define DMAOR_AE 0x00000004
#define DMAOR_NMIF 0x00000002
#define DMAOR_DME 0x00000001
/* Definitions for the SuperH DMAC */
-#define REQ_L 0x00000000
-#define REQ_E 0x00080000
-#define RACK_H 0x00000000
-#define RACK_L 0x00040000
-#define ACK_R 0x00000000
-#define ACK_W 0x00020000
-#define ACK_H 0x00000000
-#define ACK_L 0x00010000
#define DM_INC 0x00004000
#define DM_DEC 0x00008000
#define DM_FIX 0x0000c000
#define SM_INC 0x00001000
#define SM_DEC 0x00002000
#define SM_FIX 0x00003000
-#define RS_IN 0x00000200
-#define RS_OUT 0x00000300
-#define TS_BLK 0x00000040
-#define TM_BUR 0x00000020
#define CHCR_DE 0x00000001
#define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 5b1c9848124..f92c0a43c54 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@ struct shdma_ops {
dma_addr_t (*slave_addr)(struct shdma_chan *);
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
- int (*set_slave)(struct shdma_chan *, int, bool);
+ int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
void (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
@@ -116,7 +116,6 @@ struct shdma_dev {
int shdma_request_irq(struct shdma_chan *, int,
unsigned long, const char *);
-void shdma_free_irq(struct shdma_chan *);
bool shdma_reset(struct shdma_dev *sdev);
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c181399f2c2..cfb7ca094b3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -28,6 +28,27 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -95,27 +116,6 @@ static inline void call_function_init(void) { }
#endif
/*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
-
-/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
@@ -139,43 +139,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func, info, wait) \
- ({ \
- unsigned long __flags; \
- local_irq_save(__flags); \
- func(info); \
- local_irq_restore(__flags); \
- 0; \
- })
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
- do { \
- if (cpumask_test_cpu(0, (mask))) { \
- local_irq_disable(); \
- (func)(info); \
- local_irq_enable(); \
- } \
- } while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
- do { \
- void *__info = (info); \
- preempt_disable(); \
- if ((cond_func)(0, __info)) { \
- local_irq_disable(); \
- (func)(__info); \
- local_irq_enable(); \
- } \
- preempt_enable(); \
- } while (0)
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index 32be8dbdf19..274bc0fa00a 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -7,6 +7,11 @@
struct device;
struct mmc_host;
+#define MMC_SPI_USE_CD_GPIO (1 << 0)
+#define MMC_SPI_USE_RO_GPIO (1 << 1)
+#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2)
+#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3)
+
/* Put this in platform_data of a device being used to manage an MMC/SD
* card slot. (Modeled after PXA mmc glue; see that for usage examples.)
*
@@ -21,17 +26,19 @@ struct mmc_spi_platform_data {
void *);
void (*exit)(struct device *, void *);
- /* sense switch on sd cards */
- int (*get_ro)(struct device *);
-
/*
- * If board does not use CD interrupts, driver can optimize polling
- * using this function.
+ * Card Detect and Read Only GPIOs. To enable debouncing on the card
+ * detect GPIO, set the cd_debounce to the debounce time in
+ * microseconds.
*/
- int (*get_cd)(struct device *);
+ unsigned int flags;
+ unsigned int cd_gpio;
+ unsigned int cd_debounce;
+ unsigned int ro_gpio;
/* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
unsigned long caps;
+ unsigned long caps2;
/* how long to debounce card detect, in msecs */
u16 detect_delay;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 6ce690de447..437ddb6c4ae 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -264,12 +264,30 @@ static inline int get_uint(char **bpp, unsigned int *anint)
return 0;
}
+static inline int get_time(char **bpp, time_t *time)
+{
+ char buf[50];
+ long long ll;
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtoll(buf, 0, &ll))
+ return -EINVAL;
+
+ *time = (time_t)ll;
+ return 0;
+}
+
static inline time_t get_expiry(char **bpp)
{
- int rv;
+ time_t rv;
struct timespec boot;
- if (get_int(bpp, &rv))
+ if (get_time(bpp, &rv))
return 0;
if (rv < 0)
return 0;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 1f0216b9a6c..6eecfc2e4f9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -243,7 +243,6 @@ struct svc_rqst {
struct xdr_buf rq_res;
struct page * rq_pages[RPCSVC_MAXPAGES];
struct page * *rq_respages; /* points into rq_pages */
- int rq_resused; /* number of pages used for result */
struct page * *rq_next_page; /* next reply page to use */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d95cde5e257..c03c139219c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -182,6 +182,33 @@ enum {
#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
/*
+ * We use this to track usage of a cluster. A cluster is a block of swap disk
+ * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
+ * free clusters are organized into a list. We fetch an entry from the list to
+ * get a free cluster.
+ *
+ * The data field stores next cluster if the cluster is free or cluster usage
+ * counter otherwise. The flags field determines if a cluster is free. This is
+ * protected by swap_info_struct.lock.
+ */
+struct swap_cluster_info {
+ unsigned int data:24;
+ unsigned int flags:8;
+};
+#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
+#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+
+/*
+ * We assign a cluster to each CPU, so each CPU can allocate swap entry from
+ * its own cluster and swapout sequentially. The purpose is to optimize swapout
+ * throughput.
+ */
+struct percpu_cluster {
+ struct swap_cluster_info index; /* Current cluster index */
+ unsigned int next; /* Likely next allocation offset */
+};
+
+/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
@@ -191,14 +218,16 @@ struct swap_info_struct {
signed char next; /* next type on the swap list */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
+ struct swap_cluster_info free_cluster_head; /* free cluster list head */
+ struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
unsigned int inuse_pages; /* number of those currently in use */
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
- unsigned int lowest_alloc; /* while preparing discard cluster */
- unsigned int highest_alloc; /* while preparing discard cluster */
+ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct swap_extent *curr_swap_extent;
struct swap_extent first_swap_extent;
struct block_device *bdev; /* swap device or bdev of swap file */
@@ -212,14 +241,18 @@ struct swap_info_struct {
* protect map scan related fields like
* swap_map, lowest_bit, highest_bit,
* inuse_pages, cluster_next,
- * cluster_nr, lowest_alloc and
- * highest_alloc. other fields are only
- * changed at swapon/swapoff, so are
- * protected by swap_lock. changing
- * flags need hold this lock and
- * swap_lock. If both locks need hold,
- * hold swap_lock first.
+ * cluster_nr, lowest_alloc,
+ * highest_alloc, free/discard cluster
+ * list. other fields are only changed
+ * at swapon/swapoff, so are protected
+ * by swap_lock. changing flags need
+ * hold this lock and swap_lock. If
+ * both locks need hold, hold swap_lock
+ * first.
*/
+ struct work_struct discard_work; /* discard worker */
+ struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
+ struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
};
struct swap_list_t {
@@ -414,6 +447,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
#else /* CONFIG_SWAP */
+#define swap_address_space(entry) (NULL)
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 84662ecc7b5..7fac04e7ff6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -186,6 +186,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 2c02f3a8d2b..80cf8173a65 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,15 @@ struct pci_dev;
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
+#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes)
+{
+}
+#endif
/**
* vga_get - acquire & locks VGA resources
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index bd6cf61142b..1855f0a22ad 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -70,6 +70,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_SMP
+ NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
+ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+#endif
+ NR_TLB_LOCAL_FLUSH_ALL,
+ NR_TLB_LOCAL_FLUSH_ONE,
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c586679b6fe..e4b948080d2 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -143,7 +143,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
extern unsigned long global_reclaimable_pages(void);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
#ifdef CONFIG_NUMA
/*
@@ -198,7 +197,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
-void refresh_cpu_vm_stats(int);
+void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
@@ -255,6 +254,7 @@ static inline void __dec_zone_page_state(struct page *page,
static inline void refresh_cpu_vm_stats(int cpu) { }
static inline void refresh_zone_stat_thresholds(void) { }
+static inline void cpu_vm_stats_fold(int cpu) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_pageset *pset) { }
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4e198ca1f68..021b8a319b9 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -98,8 +98,6 @@ int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
-long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
- enum wb_reason reason);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 4c7c01a7391..c38a005bd0c 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -26,6 +26,8 @@
#ifndef NET_9P_CLIENT_H
#define NET_9P_CLIENT_H
+#include <linux/utsname.h>
+
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -134,6 +136,7 @@ struct p9_req_t {
* @tagpool - transaction id accounting for session
* @reqs - 2D array of requests
* @max_tag - current maximum tag id allocated
+ * @name - node name used as client id
*
* The client structure is used to keep track of various per-client
* state that has been instantiated.
@@ -164,6 +167,8 @@ struct p9_client {
struct p9_idpool *tagpool;
struct p9_req_t *reqs[P9_ROW_MAXTAG];
int max_tag;
+
+ char name[__NEW_UTS_LEN + 1];
};
/**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 3c4211f0bed..ea0cc26ab70 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -190,7 +190,9 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
}
extern int ndisc_init(void);
+extern int ndisc_late_init(void);
+extern void ndisc_late_cleanup(void);
extern void ndisc_cleanup(void);
extern int ndisc_rcv(struct sk_buff *skb);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 6bc943ecb84..d0c61347662 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -268,11 +268,13 @@ TRACE_EVENT(mm_page_alloc_extfrag,
TP_PROTO(struct page *page,
int alloc_order, int fallback_order,
- int alloc_migratetype, int fallback_migratetype),
+ int alloc_migratetype, int fallback_migratetype,
+ int change_ownership),
TP_ARGS(page,
alloc_order, fallback_order,
- alloc_migratetype, fallback_migratetype),
+ alloc_migratetype, fallback_migratetype,
+ change_ownership),
TP_STRUCT__entry(
__field( struct page *, page )
@@ -280,6 +282,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__field( int, fallback_order )
__field( int, alloc_migratetype )
__field( int, fallback_migratetype )
+ __field( int, change_ownership )
),
TP_fast_assign(
@@ -288,6 +291,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
__entry->fallback_migratetype = fallback_migratetype;
+ __entry->change_ownership = change_ownership;
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
@@ -299,7 +303,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->alloc_migratetype,
__entry->fallback_migratetype,
__entry->fallback_order < pageblock_order,
- __entry->alloc_migratetype == __entry->fallback_migratetype)
+ __entry->change_ownership)
);
#endif /* _TRACE_KMEM_H */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index afd0cbd52ed..f1e12bd40b3 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 25
+#define DM_VERSION_MINOR 26
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2013-06-26)"
+#define DM_VERSION_EXTRA "-ioctl (2013-08-15)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/init/Kconfig b/init/Kconfig
index bfa9e13c9a9..18bd9e3d327 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1670,6 +1670,7 @@ config BASE_SMALL
menuconfig MODULES
bool "Enable loadable module support"
+ option modules
help
Kernel modules are small pieces of compiled code which can
be inserted in the running kernel, rather than being
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 816014c4627..a51cddc2ff8 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -26,6 +26,8 @@
#include <linux/async.h>
#include <linux/fs_struct.h>
#include <linux/slab.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
@@ -588,3 +590,46 @@ out:
sys_mount(".", "/", NULL, MS_MOVE, NULL);
sys_chroot(".");
}
+
+static bool is_tmpfs;
+static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static unsigned long once;
+ void *fill = ramfs_fill_super;
+
+ if (test_and_set_bit(0, &once))
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
+ fill = shmem_fill_super;
+
+ return mount_nodev(fs_type, flags, data, fill);
+}
+
+static struct file_system_type rootfs_fs_type = {
+ .name = "rootfs",
+ .mount = rootfs_mount,
+ .kill_sb = kill_litter_super,
+};
+
+int __init init_rootfs(void)
+{
+ int err = register_filesystem(&rootfs_fs_type);
+
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
+ (!root_fs_names || strstr(root_fs_names, "tmpfs"))) {
+ err = shmem_init();
+ is_tmpfs = true;
+ } else {
+ err = init_ramfs_fs();
+ }
+
+ if (err)
+ unregister_filesystem(&rootfs_fs_type);
+
+ return err;
+}
diff --git a/ipc/msg.c b/ipc/msg.c
index b65fdf1a09d..b0d541d4267 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -70,8 +70,6 @@ struct msg_sender {
#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
-#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
-
static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
static int newque(struct ipc_namespace *, struct ipc_params *);
#ifdef CONFIG_PROC_FS
@@ -172,7 +170,7 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
* @ns: namespace
* @params: ptr to the structure that contains the key and msgflg
*
- * Called with msg_ids.rw_mutex held (writer)
+ * Called with msg_ids.rwsem held (writer)
*/
static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
@@ -259,8 +257,8 @@ static void expunge_all(struct msg_queue *msq, int res)
* removes the message queue from message queue ID IDR, and cleans up all the
* messages associated with this queue.
*
- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
+ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
+ * before freeque() is called. msg_ids.rwsem remains locked on exit.
*/
static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
@@ -270,7 +268,8 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
expunge_all(msq, -EIDRM);
ss_wakeup(&msq->q_senders, 1);
msg_rmid(ns, msq);
- msg_unlock(msq);
+ ipc_unlock_object(&msq->q_perm);
+ rcu_read_unlock();
list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
atomic_dec(&ns->msg_hdrs);
@@ -282,7 +281,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
}
/*
- * Called with msg_ids.rw_mutex and ipcp locked.
+ * Called with msg_ids.rwsem and ipcp locked.
*/
static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
{
@@ -386,9 +385,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
}
/*
- * This function handles some msgctl commands which require the rw_mutex
+ * This function handles some msgctl commands which require the rwsem
* to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
struct msqid_ds __user *buf, int version)
@@ -403,7 +402,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
return -EFAULT;
}
- down_write(&msg_ids(ns).rw_mutex);
+ down_write(&msg_ids(ns).rwsem);
rcu_read_lock();
ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
@@ -459,7 +458,7 @@ out_unlock0:
out_unlock1:
rcu_read_unlock();
out_up:
- up_write(&msg_ids(ns).rw_mutex);
+ up_write(&msg_ids(ns).rwsem);
return err;
}
@@ -494,7 +493,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
msginfo.msgmnb = ns->msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
- down_read(&msg_ids(ns).rw_mutex);
+ down_read(&msg_ids(ns).rwsem);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids(ns).in_use;
msginfo.msgmap = atomic_read(&ns->msg_hdrs);
@@ -505,7 +504,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
msginfo.msgtql = MSGTQL;
}
max_id = ipc_get_maxid(&msg_ids(ns));
- up_read(&msg_ids(ns).rw_mutex);
+ up_read(&msg_ids(ns).rwsem);
if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 4be6581d3b7..59451c1e214 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
int next_id;
int total, in_use;
- down_write(&ids->rw_mutex);
+ down_write(&ids->rwsem);
in_use = ids->in_use;
@@ -89,11 +89,12 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
perm = idr_find(&ids->ipcs_idr, next_id);
if (perm == NULL)
continue;
- ipc_lock_by_ptr(perm);
+ rcu_read_lock();
+ ipc_lock_object(perm);
free(ns, perm);
total++;
}
- up_write(&ids->rw_mutex);
+ up_write(&ids->rwsem);
}
static void free_ipc_ns(struct ipc_namespace *ns)
diff --git a/ipc/sem.c b/ipc/sem.c
index 41088899783..69b6a21f384 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -322,7 +322,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
}
/*
- * sem_lock_(check_) routines are called in the paths where the rw_mutex
+ * sem_lock_(check_) routines are called in the paths where the rwsem
* is not held.
*
* The caller holds the RCU read lock.
@@ -426,7 +426,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
* @ns: namespace
* @params: ptr to the structure that contains key, semflg and nsems
*
- * Called with sem_ids.rw_mutex held (as a writer)
+ * Called with sem_ids.rwsem held (as a writer)
*/
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
@@ -492,7 +492,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
/*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
*/
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
{
@@ -503,7 +503,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
}
/*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
*/
static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
@@ -994,8 +994,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
return semzcnt;
}
-/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
+/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
* remains locked on exit.
*/
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
@@ -1116,7 +1116,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
seminfo.semmnu = SEMMNU;
seminfo.semmap = SEMMAP;
seminfo.semume = SEMUME;
- down_read(&sem_ids(ns).rw_mutex);
+ down_read(&sem_ids(ns).rwsem);
if (cmd == SEM_INFO) {
seminfo.semusz = sem_ids(ns).in_use;
seminfo.semaem = ns->used_sems;
@@ -1125,7 +1125,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
seminfo.semaem = SEMAEM;
}
max_id = ipc_get_maxid(&sem_ids(ns));
- up_read(&sem_ids(ns).rw_mutex);
+ up_read(&sem_ids(ns).rwsem);
if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_id < 0) ? 0: max_id;
@@ -1431,9 +1431,9 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
}
/*
- * This function handles some semctl commands which require the rw_mutex
+ * This function handles some semctl commands which require the rwsem
* to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int semctl_down(struct ipc_namespace *ns, int semid,
int cmd, int version, void __user *p)
@@ -1448,7 +1448,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
return -EFAULT;
}
- down_write(&sem_ids(ns).rw_mutex);
+ down_write(&sem_ids(ns).rwsem);
rcu_read_lock();
ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
@@ -1487,7 +1487,7 @@ out_unlock0:
out_unlock1:
rcu_read_unlock();
out_up:
- up_write(&sem_ids(ns).rw_mutex);
+ up_write(&sem_ids(ns).rwsem);
return err;
}
diff --git a/ipc/shm.c b/ipc/shm.c
index c6b4ad5ce3b..2821cdf93ad 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -19,6 +19,9 @@
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
+ *
+ * Better ipc lock (kern_ipc_perm.lock) handling
+ * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
*/
#include <linux/slab.h>
@@ -80,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *ns)
}
/*
- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
- * Only shm_ids.rw_mutex remains locked on exit.
+ * Called with shm_ids.rwsem (writer) and the shp structure locked.
+ * Only shm_ids.rwsem remains locked on exit.
*/
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
@@ -124,8 +127,28 @@ void __init shm_init (void)
IPC_SHM_IDS, sysvipc_shm_proc_show);
}
+static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
+{
+ struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
+
+ if (IS_ERR(ipcp))
+ return ERR_CAST(ipcp);
+
+ return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
+static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
+{
+ struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
+
+ if (IS_ERR(ipcp))
+ return ERR_CAST(ipcp);
+
+ return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
/*
- * shm_lock_(check_) routines are called in the paths where the rw_mutex
+ * shm_lock_(check_) routines are called in the paths where the rwsem
* is not necessarily held.
*/
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
@@ -144,17 +167,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
ipc_lock_object(&ipcp->shm_perm);
}
-static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return (struct shmid_kernel *)ipcp;
-
- return container_of(ipcp, struct shmid_kernel, shm_perm);
-}
-
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{
ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -182,7 +194,7 @@ static void shm_open(struct vm_area_struct *vma)
* @ns: namespace
* @shp: struct to free
*
- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
+ * It has to be called with shp and shm_ids.rwsem (writer) locked,
* but returns with shp unlocked and freed.
*/
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
@@ -230,7 +242,7 @@ static void shm_close(struct vm_area_struct *vma)
struct shmid_kernel *shp;
struct ipc_namespace *ns = sfd->ns;
- down_write(&shm_ids(ns).rw_mutex);
+ down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
BUG_ON(IS_ERR(shp));
@@ -241,10 +253,10 @@ static void shm_close(struct vm_area_struct *vma)
shm_destroy(ns, shp);
else
shm_unlock(shp);
- up_write(&shm_ids(ns).rw_mutex);
+ up_write(&shm_ids(ns).rwsem);
}
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_current(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
@@ -275,7 +287,7 @@ static int shm_try_destroy_current(int id, void *p, void *data)
return 0;
}
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
@@ -286,7 +298,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
* We want to destroy segments without users and with already
* exit'ed originating process.
*
- * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
+ * As shp->* are changed under rwsem, it's safe to skip shp locking.
*/
if (shp->shm_creator != NULL)
return 0;
@@ -300,10 +312,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
- down_write(&shm_ids(ns).rw_mutex);
+ down_write(&shm_ids(ns).rwsem);
if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
- up_write(&shm_ids(ns).rw_mutex);
+ up_write(&shm_ids(ns).rwsem);
}
@@ -315,10 +327,10 @@ void exit_shm(struct task_struct *task)
return;
/* Destroy all already created segments, but not mapped yet */
- down_write(&shm_ids(ns).rw_mutex);
+ down_write(&shm_ids(ns).rwsem);
if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
- up_write(&shm_ids(ns).rw_mutex);
+ up_write(&shm_ids(ns).rwsem);
}
static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -452,7 +464,7 @@ static const struct vm_operations_struct shm_vm_ops = {
* @ns: namespace
* @params: ptr to the structure that contains key, size and shmflg
*
- * Called with shm_ids.rw_mutex held as a writer.
+ * Called with shm_ids.rwsem held as a writer.
*/
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
@@ -560,7 +572,7 @@ no_file:
}
/*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
*/
static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
{
@@ -571,7 +583,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
}
/*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
*/
static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
@@ -684,7 +696,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
/*
* Calculate and add used RSS and swap pages of a shm.
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
*/
static void shm_add_rss_swap(struct shmid_kernel *shp,
unsigned long *rss_add, unsigned long *swp_add)
@@ -711,7 +723,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
}
/*
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
*/
static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
unsigned long *swp)
@@ -740,9 +752,9 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
}
/*
- * This function handles some shmctl commands which require the rw_mutex
+ * This function handles some shmctl commands which require the rwsem
* to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
struct shmid_ds __user *buf, int version)
@@ -757,14 +769,13 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
return -EFAULT;
}
- down_write(&shm_ids(ns).rw_mutex);
+ down_write(&shm_ids(ns).rwsem);
rcu_read_lock();
- ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
- &shmid64.shm_perm, 0);
+ ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
+ &shmid64.shm_perm, 0);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
- /* the ipc lock is not held upon failure */
goto out_unlock1;
}
@@ -772,14 +783,16 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
err = security_shm_shmctl(shp, cmd);
if (err)
- goto out_unlock0;
+ goto out_unlock1;
switch (cmd) {
case IPC_RMID:
+ ipc_lock_object(&shp->shm_perm);
/* do_shm_rmid unlocks the ipc object and rcu */
do_shm_rmid(ns, ipcp);
goto out_up;
case IPC_SET:
+ ipc_lock_object(&shp->shm_perm);
err = ipc_update_perm(&shmid64.shm_perm, ipcp);
if (err)
goto out_unlock0;
@@ -787,6 +800,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
break;
default:
err = -EINVAL;
+ goto out_unlock1;
}
out_unlock0:
@@ -794,33 +808,28 @@ out_unlock0:
out_unlock1:
rcu_read_unlock();
out_up:
- up_write(&shm_ids(ns).rw_mutex);
+ up_write(&shm_ids(ns).rwsem);
return err;
}
-SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
+ int cmd, int version, void __user *buf)
{
+ int err;
struct shmid_kernel *shp;
- int err, version;
- struct ipc_namespace *ns;
- if (cmd < 0 || shmid < 0) {
- err = -EINVAL;
- goto out;
+ /* preliminary security checks for *_INFO */
+ if (cmd == IPC_INFO || cmd == SHM_INFO) {
+ err = security_shm_shmctl(NULL, cmd);
+ if (err)
+ return err;
}
- version = ipc_parse_version(&cmd);
- ns = current->nsproxy->ipc_ns;
-
- switch (cmd) { /* replace with proc interface ? */
+ switch (cmd) {
case IPC_INFO:
{
struct shminfo64 shminfo;
- err = security_shm_shmctl(NULL, cmd);
- if (err)
- return err;
-
memset(&shminfo, 0, sizeof(shminfo));
shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
shminfo.shmmax = ns->shm_ctlmax;
@@ -830,9 +839,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
if(copy_shminfo_to_user (buf, &shminfo, version))
return -EFAULT;
- down_read(&shm_ids(ns).rw_mutex);
+ down_read(&shm_ids(ns).rwsem);
err = ipc_get_maxid(&shm_ids(ns));
- up_read(&shm_ids(ns).rw_mutex);
+ up_read(&shm_ids(ns).rwsem);
if(err<0)
err = 0;
@@ -842,19 +851,15 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
{
struct shm_info shm_info;
- err = security_shm_shmctl(NULL, cmd);
- if (err)
- return err;
-
memset(&shm_info, 0, sizeof(shm_info));
- down_read(&shm_ids(ns).rw_mutex);
+ down_read(&shm_ids(ns).rwsem);
shm_info.used_ids = shm_ids(ns).in_use;
shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
shm_info.shm_tot = ns->shm_tot;
shm_info.swap_attempts = 0;
shm_info.swap_successes = 0;
err = ipc_get_maxid(&shm_ids(ns));
- up_read(&shm_ids(ns).rw_mutex);
+ up_read(&shm_ids(ns).rwsem);
if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
err = -EFAULT;
goto out;
@@ -869,27 +874,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
struct shmid64_ds tbuf;
int result;
+ rcu_read_lock();
if (cmd == SHM_STAT) {
- shp = shm_lock(ns, shmid);
+ shp = shm_obtain_object(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
- goto out;
+ goto out_unlock;
}
result = shp->shm_perm.id;
} else {
- shp = shm_lock_check(ns, shmid);
+ shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
- goto out;
+ goto out_unlock;
}
result = 0;
}
+
err = -EACCES;
if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
goto out_unlock;
+
err = security_shm_shmctl(shp, cmd);
if (err)
goto out_unlock;
+
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
tbuf.shm_segsz = shp->shm_segsz;
@@ -899,43 +908,76 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
tbuf.shm_cpid = shp->shm_cprid;
tbuf.shm_lpid = shp->shm_lprid;
tbuf.shm_nattch = shp->shm_nattch;
- shm_unlock(shp);
- if(copy_shmid_to_user (buf, &tbuf, version))
+ rcu_read_unlock();
+
+ if (copy_shmid_to_user(buf, &tbuf, version))
err = -EFAULT;
else
err = result;
goto out;
}
+ default:
+ return -EINVAL;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ return err;
+}
+
+SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+{
+ struct shmid_kernel *shp;
+ int err, version;
+ struct ipc_namespace *ns;
+
+ if (cmd < 0 || shmid < 0)
+ return -EINVAL;
+
+ version = ipc_parse_version(&cmd);
+ ns = current->nsproxy->ipc_ns;
+
+ switch (cmd) {
+ case IPC_INFO:
+ case SHM_INFO:
+ case SHM_STAT:
+ case IPC_STAT:
+ return shmctl_nolock(ns, shmid, cmd, version, buf);
+ case IPC_RMID:
+ case IPC_SET:
+ return shmctl_down(ns, shmid, cmd, buf, version);
case SHM_LOCK:
case SHM_UNLOCK:
{
struct file *shm_file;
- shp = shm_lock_check(ns, shmid);
+ rcu_read_lock();
+ shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
- goto out;
+ goto out_unlock1;
}
audit_ipc_obj(&(shp->shm_perm));
+ err = security_shm_shmctl(shp, cmd);
+ if (err)
+ goto out_unlock1;
+ ipc_lock_object(&shp->shm_perm);
if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
kuid_t euid = current_euid();
err = -EPERM;
if (!uid_eq(euid, shp->shm_perm.uid) &&
!uid_eq(euid, shp->shm_perm.cuid))
- goto out_unlock;
+ goto out_unlock0;
if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
- goto out_unlock;
+ goto out_unlock0;
}
- err = security_shm_shmctl(shp, cmd);
- if (err)
- goto out_unlock;
-
shm_file = shp->shm_file;
if (is_file_hugepages(shm_file))
- goto out_unlock;
+ goto out_unlock0;
if (cmd == SHM_LOCK) {
struct user_struct *user = current_user();
@@ -944,32 +986,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
shp->shm_perm.mode |= SHM_LOCKED;
shp->mlock_user = user;
}
- goto out_unlock;
+ goto out_unlock0;
}
/* SHM_UNLOCK */
if (!(shp->shm_perm.mode & SHM_LOCKED))
- goto out_unlock;
+ goto out_unlock0;
shmem_lock(shm_file, 0, shp->mlock_user);
shp->shm_perm.mode &= ~SHM_LOCKED;
shp->mlock_user = NULL;
get_file(shm_file);
- shm_unlock(shp);
+ ipc_unlock_object(&shp->shm_perm);
+ rcu_read_unlock();
shmem_unlock_mapping(shm_file->f_mapping);
+
fput(shm_file);
- goto out;
- }
- case IPC_RMID:
- case IPC_SET:
- err = shmctl_down(ns, shmid, cmd, buf, version);
return err;
+ }
default:
return -EINVAL;
}
-out_unlock:
- shm_unlock(shp);
-out:
+out_unlock0:
+ ipc_unlock_object(&shp->shm_perm);
+out_unlock1:
+ rcu_read_unlock();
return err;
}
@@ -1037,10 +1078,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
* additional creator id...
*/
ns = current->nsproxy->ipc_ns;
- shp = shm_lock_check(ns, shmid);
+ rcu_read_lock();
+ shp = shm_obtain_object_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
- goto out;
+ goto out_unlock;
}
err = -EACCES;
@@ -1051,24 +1093,31 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
if (err)
goto out_unlock;
+ ipc_lock_object(&shp->shm_perm);
path = shp->shm_file->f_path;
path_get(&path);
shp->shm_nattch++;
size = i_size_read(path.dentry->d_inode);
- shm_unlock(shp);
+ ipc_unlock_object(&shp->shm_perm);
+ rcu_read_unlock();
err = -ENOMEM;
sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
- if (!sfd)
- goto out_put_dentry;
+ if (!sfd) {
+ path_put(&path);
+ goto out_nattch;
+ }
file = alloc_file(&path, f_mode,
is_file_hugepages(shp->shm_file) ?
&shm_file_operations_huge :
&shm_file_operations);
err = PTR_ERR(file);
- if (IS_ERR(file))
- goto out_free;
+ if (IS_ERR(file)) {
+ kfree(sfd);
+ path_put(&path);
+ goto out_nattch;
+ }
file->private_data = sfd;
file->f_mapping = shp->shm_file->f_mapping;
@@ -1094,7 +1143,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
addr > current->mm->start_stack - size - PAGE_SIZE * 5)
goto invalid;
}
-
+
addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
*raddr = addr;
err = 0;
@@ -1109,7 +1158,7 @@ out_fput:
fput(file);
out_nattch:
- down_write(&shm_ids(ns).rw_mutex);
+ down_write(&shm_ids(ns).rwsem);
shp = shm_lock(ns, shmid);
BUG_ON(IS_ERR(shp));
shp->shm_nattch--;
@@ -1117,20 +1166,13 @@ out_nattch:
shm_destroy(ns, shp);
else
shm_unlock(shp);
- up_write(&shm_ids(ns).rw_mutex);
-
-out:
+ up_write(&shm_ids(ns).rwsem);
return err;
out_unlock:
- shm_unlock(shp);
- goto out;
-
-out_free:
- kfree(sfd);
-out_put_dentry:
- path_put(&path);
- goto out_nattch;
+ rcu_read_unlock();
+out:
+ return err;
}
SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
@@ -1235,8 +1277,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
#else /* CONFIG_MMU */
/* under NOMMU conditions, the exact address to be destroyed must be
* given */
- retval = -EINVAL;
- if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
retval = 0;
}
diff --git a/ipc/util.c b/ipc/util.c
index 4704223bfad..e829da9ed01 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -15,6 +15,14 @@
* Jun 2006 - namespaces ssupport
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
+ *
+ * General sysv ipc locking scheme:
+ * when doing ipc id lookups, take the ids->rwsem
+ * rcu_read_lock()
+ * obtain the ipc object (kern_ipc_perm)
+ * perform security, capabilities, auditing and permission checks, etc.
+ * acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
+ * perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
*/
#include <linux/mm.h>
@@ -119,7 +127,7 @@ __initcall(ipc_init);
void ipc_init_ids(struct ipc_ids *ids)
{
- init_rwsem(&ids->rw_mutex);
+ init_rwsem(&ids->rwsem);
ids->in_use = 0;
ids->seq = 0;
@@ -174,7 +182,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
* @ids: Identifier set
* @key: The key to find
*
- * Requires ipc_ids.rw_mutex locked.
+ * Requires ipc_ids.rwsem locked.
* Returns the LOCKED pointer to the ipc structure if found or NULL
* if not.
* If key is found ipc points to the owning ipc structure
@@ -197,7 +205,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
continue;
}
- ipc_lock_by_ptr(ipc);
+ rcu_read_lock();
+ ipc_lock_object(ipc);
return ipc;
}
@@ -208,7 +217,7 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
* ipc_get_maxid - get the last assigned id
* @ids: IPC identifier set
*
- * Called with ipc_ids.rw_mutex held.
+ * Called with ipc_ids.rwsem held.
*/
int ipc_get_maxid(struct ipc_ids *ids)
@@ -246,7 +255,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
* is returned. The 'new' entry is returned in a locked state on success.
* On failure the entry is not locked and a negative err-code is returned.
*
- * Called with writer ipc_ids.rw_mutex held.
+ * Called with writer ipc_ids.rwsem held.
*/
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
@@ -312,9 +321,9 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
{
int err;
- down_write(&ids->rw_mutex);
+ down_write(&ids->rwsem);
err = ops->getnew(ns, params);
- up_write(&ids->rw_mutex);
+ up_write(&ids->rwsem);
return err;
}
@@ -331,7 +340,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
*
* On success, the IPC id is returned.
*
- * It is called with ipc_ids.rw_mutex and ipcp->lock held.
+ * It is called with ipc_ids.rwsem and ipcp->lock held.
*/
static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
@@ -376,7 +385,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
* Take the lock as a writer since we are potentially going to add
* a new entry + read locks are not "upgradable"
*/
- down_write(&ids->rw_mutex);
+ down_write(&ids->rwsem);
ipcp = ipc_findkey(ids, params->key);
if (ipcp == NULL) {
/* key not used */
@@ -402,7 +411,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
}
ipc_unlock(ipcp);
}
- up_write(&ids->rw_mutex);
+ up_write(&ids->rwsem);
return err;
}
@@ -413,7 +422,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
* @ids: IPC identifier set
* @ipcp: ipc perm structure containing the identifier to remove
*
- * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
+ * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit.
*/
@@ -621,7 +630,7 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
}
/**
- * ipc_lock - Lock an ipc structure without rw_mutex held
+ * ipc_lock - Lock an ipc structure without rwsem held
* @ids: IPC identifier set
* @id: ipc id to look for
*
@@ -677,22 +686,6 @@ out:
return out;
}
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
-{
- struct kern_ipc_perm *out;
-
- out = ipc_lock(ids, id);
- if (IS_ERR(out))
- return out;
-
- if (ipc_checkid(out, id)) {
- ipc_unlock(out);
- return ERR_PTR(-EIDRM);
- }
-
- return out;
-}
-
/**
* ipcget - Common sys_*get() code
* @ns : namsepace
@@ -733,7 +726,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
}
/**
- * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
* @ns: the ipc namespace
* @ids: the table of ids where to look for the ipc
* @id: the id of the ipc to retrieve
@@ -746,29 +739,13 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
* It must be called without any lock held and
* - retrieves the ipc with the given id in the given table.
* - performs some audit and permission check, depending on the given cmd
- * - returns the ipc with the ipc lock held in case of success
- * or an err-code without any lock held otherwise.
+ * - returns a pointer to the ipc object or otherwise, the corresponding error.
*
- * Call holding the both the rw_mutex and the rcu read lock.
+ * Call holding the both the rwsem and the rcu read lock.
*/
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
- struct ipc_ids *ids, int id, int cmd,
- struct ipc64_perm *perm, int extra_perm)
-{
- struct kern_ipc_perm *ipcp;
-
- ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
- if (IS_ERR(ipcp))
- goto out;
-
- spin_lock(&ipcp->lock);
-out:
- return ipcp;
-}
-
struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
- struct ipc_ids *ids, int id, int cmd,
- struct ipc64_perm *perm, int extra_perm)
+ struct ipc_ids *ids, int id, int cmd,
+ struct ipc64_perm *perm, int extra_perm)
{
kuid_t euid;
int err = -EPERM;
@@ -846,7 +823,8 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) {
*new_pos = pos + 1;
- ipc_lock_by_ptr(ipc);
+ rcu_read_lock();
+ ipc_lock_object(ipc);
return ipc;
}
}
@@ -884,7 +862,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
* Take the lock - this will be released by the corresponding
* call to stop().
*/
- down_read(&ids->rw_mutex);
+ down_read(&ids->rwsem);
/* pos < 0 is invalid */
if (*pos < 0)
@@ -911,7 +889,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
ids = &iter->ns->ids[iface->ids];
/* Release the lock we took in start() */
- up_read(&ids->rw_mutex);
+ up_read(&ids->rwsem);
}
static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index b6a6a88f300..c5f3338ba1f 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -94,10 +94,10 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
-/* must be called with ids->rw_mutex acquired for writing */
+/* must be called with ids->rwsem acquired for writing */
int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
-/* must be called with ids->rw_mutex acquired for reading */
+/* must be called with ids->rwsem acquired for reading */
int ipc_get_maxid(struct ipc_ids *);
/* must be called with both locks acquired. */
@@ -131,9 +131,6 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm);
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
- struct ipc_ids *ids, int id, int cmd,
- struct ipc64_perm *perm, int extra_perm);
#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/* On IA-64, we always use the "64-bit version" of the IPC structures. */
@@ -174,19 +171,12 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
assert_spin_locked(&perm->lock);
}
-static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
-{
- rcu_read_lock();
- ipc_lock_object(perm);
-}
-
static inline void ipc_unlock(struct kern_ipc_perm *perm)
{
ipc_unlock_object(perm);
rcu_read_unlock();
}
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e0aeb32415f..2418b6e71a8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -60,6 +60,7 @@
#include <linux/poll.h>
#include <linux/flex_array.h> /* used in cgroup_attach_task */
#include <linux/kthread.h>
+#include <linux/file.h>
#include <linux/atomic.h>
@@ -4034,8 +4035,8 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
struct cgroup_event *event;
struct cgroup_subsys_state *cfile_css;
unsigned int efd, cfd;
- struct file *efile;
- struct file *cfile;
+ struct fd efile;
+ struct fd cfile;
char *endp;
int ret;
@@ -4058,31 +4059,31 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
INIT_WORK(&event->remove, cgroup_event_remove);
- efile = eventfd_fget(efd);
- if (IS_ERR(efile)) {
- ret = PTR_ERR(efile);
+ efile = fdget(efd);
+ if (!efile.file) {
+ ret = -EBADF;
goto out_kfree;
}
- event->eventfd = eventfd_ctx_fileget(efile);
+ event->eventfd = eventfd_ctx_fileget(efile.file);
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
goto out_put_efile;
}
- cfile = fget(cfd);
- if (!cfile) {
+ cfile = fdget(cfd);
+ if (!cfile.file) {
ret = -EBADF;
goto out_put_eventfd;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
- ret = inode_permission(file_inode(cfile), MAY_READ);
+ ret = inode_permission(file_inode(cfile.file), MAY_READ);
if (ret < 0)
goto out_put_cfile;
- event->cft = __file_cft(cfile);
+ event->cft = __file_cft(cfile.file);
if (IS_ERR(event->cft)) {
ret = PTR_ERR(event->cft);
goto out_put_cfile;
@@ -4103,7 +4104,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
ret = -EINVAL;
event->css = cgroup_css(cgrp, event->cft->ss);
- cfile_css = css_from_dir(cfile->f_dentry->d_parent, event->cft->ss);
+ cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss);
if (event->css && event->css == cfile_css && css_tryget(event->css))
ret = 0;
@@ -4121,25 +4122,25 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
if (ret)
goto out_put_css;
- efile->f_op->poll(efile, &event->pt);
+ efile.file->f_op->poll(efile.file, &event->pt);
spin_lock(&cgrp->event_list_lock);
list_add(&event->list, &cgrp->event_list);
spin_unlock(&cgrp->event_list_lock);
- fput(cfile);
- fput(efile);
+ fdput(cfile);
+ fdput(efile);
return 0;
out_put_css:
css_put(event->css);
out_put_cfile:
- fput(cfile);
+ fdput(cfile);
out_put_eventfd:
eventfd_ctx_put(event->eventfd);
out_put_efile:
- fput(efile);
+ fdput(efile);
out_kfree:
kfree(event);
diff --git a/kernel/extable.c b/kernel/extable.c
index 67460b93b1a..832cb28105b 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -41,7 +41,7 @@ u32 __initdata main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */
void __init sort_main_extable(void)
{
- if (main_extable_sort_needed) {
+ if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
pr_notice("Sorting __ex_table...\n");
sort_extable(__start___ex_table, __stop___ex_table);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index c9eaf201300..81ccb4f010c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -351,7 +351,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
- struct mempolicy *pol;
uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
@@ -400,11 +399,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
- pol = mpol_dup(vma_policy(mpnt));
- retval = PTR_ERR(pol);
- if (IS_ERR(pol))
+ retval = vma_dup_policy(mpnt, tmp);
+ if (retval)
goto fail_nomem_policy;
- vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
@@ -472,7 +469,7 @@ out:
uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
- mpol_put(pol);
+ mpol_put(vma_policy(tmp));
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
@@ -1173,13 +1170,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
return ERR_PTR(-EINVAL);
/*
- * If the new process will be in a different pid namespace
- * don't allow the creation of threads.
+ * If the new process will be in a different pid or user namespace
+ * do not allow it to share a thread group or signal handlers or
+ * parent with the forking task.
*/
- if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
- (task_active_pid_ns(current) !=
- current->nsproxy->pid_ns_for_children))
- return ERR_PTR(-EINVAL);
+ if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+ if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
+ (task_active_pid_ns(current) !=
+ current->nsproxy->pid_ns_for_children))
+ return ERR_PTR(-EINVAL);
+ }
retval = security_task_create(clone_flags);
if (retval)
@@ -1576,15 +1576,6 @@ long do_fork(unsigned long clone_flags,
long nr;
/*
- * Do some preliminary argument and permissions checking before we
- * actually start allocating stuff
- */
- if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
- if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
- return -EINVAL;
- }
-
- /*
* Determine whether and which event to report to ptracer. When
* called from kernel_thread or CLONE_UNTRACED is explicitly
* requested, no event is reported; otherwise, report if the event
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 59f7b55ba74..2a74f307c5e 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1474,11 +1474,8 @@ static int __init __parse_crashkernel(char *cmdline,
if (first_colon && (!first_space || first_colon < first_space))
return parse_crashkernel_mem(ck_cmdline, system_ram,
crash_size, crash_base);
- else
- return parse_crashkernel_simple(ck_cmdline, crash_size,
- crash_base);
- return 0;
+ return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
}
/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6e33498d665..a0d367a4912 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
+ struct kprobe_insn_cache *cache;
int nused;
int ngarbage;
char slot_used[];
@@ -121,12 +122,6 @@ struct kprobe_insn_page {
(offsetof(struct kprobe_insn_page, slot_used) + \
(sizeof(char) * (slots)))
-struct kprobe_insn_cache {
- struct list_head pages; /* list of kprobe_insn_page */
- size_t insn_size; /* size of instruction slot */
- int nr_garbage;
-};
-
static int slots_per_page(struct kprobe_insn_cache *c)
{
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
@@ -138,8 +133,20 @@ enum kprobe_slot_state {
SLOT_USED = 2,
};
-static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
-static struct kprobe_insn_cache kprobe_insn_slots = {
+static void *alloc_insn_page(void)
+{
+ return module_alloc(PAGE_SIZE);
+}
+
+static void free_insn_page(void *page)
+{
+ module_free(NULL, page);
+}
+
+struct kprobe_insn_cache kprobe_insn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
+ .alloc = alloc_insn_page,
+ .free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
@@ -150,10 +157,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
-static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
+kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
+ kprobe_opcode_t *slot = NULL;
+ mutex_lock(&c->mutex);
retry:
list_for_each_entry(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
@@ -162,7 +171,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED;
kip->nused++;
- return kip->insns + (i * c->insn_size);
+ slot = kip->insns + (i * c->insn_size);
+ goto out;
}
}
/* kip->nused is broken. Fix it. */
@@ -178,37 +188,29 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
/* All out of space. Need to allocate a new page. */
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
- return NULL;
+ goto out;
/*
* Use module_alloc so this page is within +/- 2GB of where the
* kernel image and loaded module images reside. This is required
* so x86_64 can correctly handle the %rip-relative fixups.
*/
- kip->insns = module_alloc(PAGE_SIZE);
+ kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
- return NULL;
+ goto out;
}
INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
+ kip->cache = c;
list_add(&kip->list, &c->pages);
- return kip->insns;
-}
-
-
-kprobe_opcode_t __kprobes *get_insn_slot(void)
-{
- kprobe_opcode_t *ret = NULL;
-
- mutex_lock(&kprobe_insn_mutex);
- ret = __get_insn_slot(&kprobe_insn_slots);
- mutex_unlock(&kprobe_insn_mutex);
-
- return ret;
+ slot = kip->insns;
+out:
+ mutex_unlock(&c->mutex);
+ return slot;
}
/* Return 1 if all garbages are collected, otherwise 0. */
@@ -225,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
*/
if (!list_is_singular(&kip->list)) {
list_del(&kip->list);
- module_free(NULL, kip->insns);
+ kip->cache->free(kip->insns);
kfree(kip);
}
return 1;
@@ -255,11 +257,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
return 0;
}
-static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
- kprobe_opcode_t *slot, int dirty)
+void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip;
+ mutex_lock(&c->mutex);
list_for_each_entry(kip, &c->pages, list) {
long idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
@@ -272,45 +275,25 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
collect_garbage_slots(c);
} else
collect_one_slot(kip, idx);
- return;
+ goto out;
}
}
/* Could not free this slot. */
WARN_ON(1);
+out:
+ mutex_unlock(&c->mutex);
}
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
-{
- mutex_lock(&kprobe_insn_mutex);
- __free_insn_slot(&kprobe_insn_slots, slot, dirty);
- mutex_unlock(&kprobe_insn_mutex);
-}
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
-static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
-static struct kprobe_insn_cache kprobe_optinsn_slots = {
+struct kprobe_insn_cache kprobe_optinsn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
+ .alloc = alloc_insn_page,
+ .free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
};
-/* Get a slot for optimized_kprobe buffer */
-kprobe_opcode_t __kprobes *get_optinsn_slot(void)
-{
- kprobe_opcode_t *ret = NULL;
-
- mutex_lock(&kprobe_optinsn_mutex);
- ret = __get_insn_slot(&kprobe_optinsn_slots);
- mutex_unlock(&kprobe_optinsn_mutex);
-
- return ret;
-}
-
-void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
-{
- mutex_lock(&kprobe_optinsn_mutex);
- __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
- mutex_unlock(&kprobe_optinsn_mutex);
-}
#endif
#endif
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 2b6e69909c3..7cbd4507a7e 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -18,14 +18,14 @@
struct key *modsign_keyring;
-extern __initdata const u8 modsign_certificate_list[];
-extern __initdata const u8 modsign_certificate_list_end[];
+extern __initconst const u8 modsign_certificate_list[];
+extern __initconst const u8 modsign_certificate_list_end[];
/*
* We need to make sure ccache doesn't cache the .o file as it doesn't notice
* if modsign.pub changes.
*/
-static __initdata const char annoy_ccache[] = __TIME__ "foo";
+static __initconst const char annoy_ccache[] = __TIME__ "foo";
/*
* Load the compiled-in keys
diff --git a/kernel/panic.c b/kernel/panic.c
index 80186460051..b6c482ccc5d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -123,10 +123,14 @@ void panic(const char *fmt, ...)
*/
smp_send_stop();
- kmsg_dump(KMSG_DUMP_PANIC);
-
+ /*
+ * Run any panic handlers, including those that might need to
+ * add information to the kmsg dump output.
+ */
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+ kmsg_dump(KMSG_DUMP_PANIC);
+
bust_spinlocks(0);
if (!panic_blink)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 349587bb03e..358a146fd4d 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -352,7 +352,7 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
struct mem_extent *ext, *cur, *aux;
zone_start = zone->zone_start_pfn;
- zone_end = zone->zone_start_pfn + zone->spanned_pages;
+ zone_end = zone_end_pfn(zone);
list_for_each_entry(ext, list, hook)
if (zone_start <= ext->end)
@@ -884,7 +884,7 @@ static unsigned int count_highmem_pages(void)
continue;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_highmem_page(zone, pfn))
n++;
@@ -948,7 +948,7 @@ static unsigned int count_data_pages(void)
continue;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_page(zone, pfn))
n++;
@@ -1041,7 +1041,7 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
unsigned long max_zone_pfn;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (page_is_saveable(zone, pfn))
memory_bm_set_bit(orig_bm, pfn);
@@ -1093,7 +1093,7 @@ void swsusp_free(void)
unsigned long pfn, max_zone_pfn;
for_each_populated_zone(zone) {
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
@@ -1755,7 +1755,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
/* Clear page flags */
for_each_populated_zone(zone) {
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a146ee327f6..dd562e9aa2c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -236,7 +236,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
*/
int dumpable = 0;
/* Don't let security modules deny introspection */
- if (task == current)
+ if (same_thread_group(task, current))
return 0;
rcu_read_lock();
tcred = __task_cred(task);
diff --git a/kernel/signal.c b/kernel/signal.c
index 50e41075ac7..ded28b91fa5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3394,7 +3394,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
new_ka.sa.sa_restorer = compat_ptr(restorer);
#endif
ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
if (ret)
return -EFAULT;
sigset_from_compat(&new_ka.sa.sa_mask, &mask);
@@ -3406,7 +3406,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
&oact->sa_handler);
ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
#ifdef __ARCH_HAS_SA_RESTORER
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
&oact->sa_restorer);
diff --git a/kernel/smp.c b/kernel/smp.c
index 449b707fc20..0564571dcdf 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -48,10 +48,13 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
- cpu_to_node(cpu)))
+ cpu_to_node(cpu))) {
+ free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
+ }
cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) {
+ free_cpumask_var(cfd->cpumask_ipi);
free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
}
@@ -572,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
*
* If @wait is true, then returns once @func has returned.
*
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
*/
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
@@ -582,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
- local_irq_disable();
+ unsigned long flags;
+ local_irq_save(flags);
func(info);
- local_irq_enable();
+ local_irq_restore(flags);
}
put_cpu();
}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd8065a3c..4b082b5cac9 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -34,6 +34,20 @@
#else
#define raw_read_can_lock(l) read_can_lock(l)
#define raw_write_can_lock(l) write_can_lock(l)
+
+/*
+ * Some architectures can relax in favour of the CPU owning the lock.
+ */
+#ifndef arch_read_relax
+# define arch_read_relax(l) cpu_relax()
+#endif
+#ifndef arch_write_relax
+# define arch_write_relax(l) cpu_relax()
+#endif
+#ifndef arch_spin_relax
+# define arch_spin_relax(l) cpu_relax()
+#endif
+
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 07f6fc468e1..dc69093a8ec 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1225,7 +1225,7 @@ static struct ctl_table vm_table[] = {
.data = &hugepages_treat_as_movable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = hugetlb_treat_movable_handler,
+ .proc_handler = proc_dointvec,
},
{
.procname = "nr_overcommit_hugepages",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 65bd3c92d6f..8727032e3a6 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -4,6 +4,23 @@
static struct callback_head work_exited; /* all we need is ->next == NULL */
+/**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+ * @work: the callback to run
+ * @notify: send the notification if true
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify.
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task returns from kernel
+ * mode or exits.
+ *
+ * This is like the signal handler which runs in kernel mode, but it doesn't
+ * try to wake up the @task.
+ *
+ * RETURNS:
+ * 0 if succeeds or -ESRCH.
+ */
int
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
{
@@ -21,11 +38,22 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
return 0;
}
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
struct callback_head **pprev = &task->task_works;
- struct callback_head *work = NULL;
+ struct callback_head *work;
unsigned long flags;
/*
* If cmpxchg() fails we continue without updating pprev.
@@ -35,7 +63,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = ACCESS_ONCE(*pprev))) {
- read_barrier_depends();
+ smp_read_barrier_depends();
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
@@ -46,6 +74,14 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
return work;
}
+/**
+ * task_work_run - execute the works added by task_work_add()
+ *
+ * Flush the pending works. Should be used by the core kernel code.
+ * Called before the task returns to the user-mode or stops, or when
+ * it exits. In the latter case task_work_add() can no longer add the
+ * new work after task_work_run() returns.
+ */
void task_work_run(void)
{
struct task_struct *task = current;
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e9faf..630d72bf7e4 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -10,12 +10,64 @@
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
+ unsigned long flags;
+
WARN_ON(cpu != 0);
- local_irq_disable();
- (func)(info);
- local_irq_enable();
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
+
+int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(on_each_cpu);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ unsigned long flags;
+
+ if (cpumask_test_cpu(0, mask)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ if (cond_func(0, info)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 652bea9054f..c9eef36739a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1461,7 +1461,7 @@ config BACKTRACE_SELF_TEST
config RBTREE_TEST
tristate "Red-Black tree test"
- depends on m && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
A benchmark measuring the performance of the rbtree library.
Also includes rbtree invariant checks.
diff --git a/lib/crc32.c b/lib/crc32.c
index 072fbd8234d..410093dbe51 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -131,11 +131,14 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
#endif
/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
+ * CRC32/CRC32C
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
+ * uses, or the previous crc32/crc32c value if computing incrementally.
+ * @p: pointer to buffer over which CRC32/CRC32C is run
* @len: length of buffer @p
+ * @tab: little-endian Ethernet table
+ * @polynomial: CRC32/CRC32c LE polynomial
*/
static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
size_t len, const u32 (*tab)[256],
@@ -201,11 +204,13 @@ EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
* other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC is run
+ * @p: pointer to buffer over which CRC32 is run
* @len: length of buffer @p
+ * @tab: big-endian Ethernet table
+ * @polynomial: CRC32 BE polynomial
*/
static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
size_t len, const u32 (*tab)[256],
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 19ff89e34ee..d619b28c456 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = 0x7fffffff; /* no limit */
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
diff --git a/lib/div64.c b/lib/div64.c
index a163b6caef7..4382ad77777 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -79,6 +79,46 @@ EXPORT_SYMBOL(div_s64_rem);
#endif
/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ * @remainder: 64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+ u32 high = divisor >> 32;
+ u64 quot;
+
+ if (high == 0) {
+ u32 rem32;
+ quot = div_u64_rem(dividend, divisor, &rem32);
+ *remainder = rem32;
+ } else {
+ int n = 1 + fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
+
+ if (quot != 0)
+ quot--;
+
+ *remainder = dividend - quot * divisor;
+ if (*remainder >= divisor) {
+ quot++;
+ *remainder -= divisor;
+ }
+ }
+
+ return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
+/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
diff --git a/lib/genalloc.c b/lib/genalloc.c
index b35cfa9bc3d..26cf20be72b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -37,6 +37,11 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
+static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
+{
+ return chunk->end_addr - chunk->start_addr + 1;
+}
+
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
unsigned long val, nval;
@@ -182,13 +187,13 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
- chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
+ chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
chunk->phys_addr = phys;
chunk->start_addr = virt;
- chunk->end_addr = virt + size;
+ chunk->end_addr = virt + size - 1;
atomic_set(&chunk->avail, size);
spin_lock(&pool->lock);
@@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
paddr = chunk->phys_addr + (addr - chunk->start_addr);
break;
}
@@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool)
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
@@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
if (size > atomic_read(&chunk->avail))
continue;
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
retry:
start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
pool->data);
@@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
- BUG_ON(addr + size > chunk->end_addr);
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+ BUG_ON(addr + size - 1 > chunk->end_addr);
start_bit = (addr - chunk->start_addr) >> order;
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
@@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
- size += chunk->end_addr - chunk->start_addr;
+ size += chunk_size(chunk);
rcu_read_unlock();
return size;
}
@@ -519,7 +524,6 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
/**
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
* @dev: device to retrieve the gen_pool from
- * @name: Optional name for the gen_pool, usually NULL
*
* Returns the gen_pool for the device if one is present, or NULL.
*/
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 411be80ddb4..df6839e3ce0 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -283,8 +283,8 @@ _output_error:
return (int) (-(((char *) ip) - source));
}
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
- size_t actual_dest_len)
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len)
{
int ret = -1;
int input_len = 0;
@@ -302,8 +302,8 @@ exit_0:
EXPORT_SYMBOL(lz4_decompress);
#endif
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
- char *dest, size_t *dest_len)
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+ unsigned char *dest, size_t *dest_len)
{
int ret = -1;
int out_len = 0;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e7964296fd5..7811ed3b4e7 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
+#include <linux/hardirq.h> /* in_interrupt() */
#ifdef __KERNEL__
@@ -207,7 +208,12 @@ radix_tree_node_alloc(struct radix_tree_root *root)
struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
- if (!(gfp_mask & __GFP_WAIT)) {
+ /*
+ * Preload code isn't irq safe and it doesn't make sence to use
+ * preloading in the interrupt anyway as all the allocations have to
+ * be atomic. So just do normal allocation when in interrupt.
+ */
+ if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) {
struct radix_tree_preload *rtp;
/*
@@ -264,7 +270,7 @@ radix_tree_node_free(struct radix_tree_node *node)
* To make use of this facility, the radix tree must be initialised without
* __GFP_WAIT being passed to INIT_RADIX_TREE().
*/
-int radix_tree_preload(gfp_t gfp_mask)
+static int __radix_tree_preload(gfp_t gfp_mask)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
@@ -288,9 +294,40 @@ int radix_tree_preload(gfp_t gfp_mask)
out:
return ret;
}
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+ * success, return zero, with preemption disabled. On error, return -ENOMEM
+ * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_WAIT being passed to INIT_RADIX_TREE().
+ */
+int radix_tree_preload(gfp_t gfp_mask)
+{
+ /* Warn on non-sensical use... */
+ WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT));
+ return __radix_tree_preload(gfp_mask);
+}
EXPORT_SYMBOL(radix_tree_preload);
/*
+ * The same as above function, except we don't guarantee preloading happens.
+ * We do it, if we decide it helps. On success, return zero with preemption
+ * disabled. On error, return -ENOMEM with preemption not disabled.
+ */
+int radix_tree_maybe_preload(gfp_t gfp_mask)
+{
+ if (gfp_mask & __GFP_WAIT)
+ return __radix_tree_preload(gfp_mask);
+ /* Preloading doesn't help anything with this gfp mask, skip it */
+ preempt_disable();
+ return 0;
+}
+EXPORT_SYMBOL(radix_tree_maybe_preload);
+
+/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
*/
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b4625787c7e..c7dab064555 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -6,6 +6,7 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
+raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
hostprogs-y += mktables
@@ -110,6 +111,11 @@ $(obj)/neon8.c: UNROLL := 8
$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
+targets += tilegx8.c
+$(obj)/tilegx8.c: UNROLL := 8
+$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 74e6f5629db..f0b1aa3586d 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -66,6 +66,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_altivec4,
&raid6_altivec8,
#endif
+#if defined(CONFIG_TILEGX)
+ &raid6_tilegx8,
+#endif
&raid6_intx1,
&raid6_intx2,
&raid6_intx4,
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 28afa1a06e0..29090f3db67 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -40,13 +40,16 @@ else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
- HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
+ HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
gcc -c -x c - >&/dev/null && \
rm ./-.o && echo yes)
ifeq ($(HAS_ALTIVEC),yes)
OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
endif
endif
+ifeq ($(ARCH),tilegx)
+OBJS += tilegx8.o
+endif
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
@@ -109,11 +112,15 @@ int16.c: int.uc ../unroll.awk
int32.c: int.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=32 < int.uc > $@
+tilegx8.c: tilegx.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
+
tables.c: mktables
./mktables > tables.c
clean:
rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
+ rm -f tilegx*.c
spotless: clean
rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
new file mode 100644
index 00000000000..e7c29459cbc
--- /dev/null
+++ b/lib/raid6/tilegx.uc
@@ -0,0 +1,86 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Copyright 2012 Tilera Corporation - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * tilegx$#.c
+ *
+ * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
+ *
+ * This file is postprocessed using unroll.awk.
+ *
+ */
+
+#include <linux/raid/pq.h>
+
+/* Create 8 byte copies of constant byte */
+# define NBYTES(x) (__insn_v1addi(0, x))
+# define NSIZE 8
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline __attribute_const__ u64 SHLBYTE(u64 v)
+{
+ /* Vector One Byte Shift Left Immediate. */
+ return __insn_v1shli(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline __attribute_const__ u64 MASK(u64 v)
+{
+ /* Vector One Byte Shift Right Signed Immediate. */
+ return __insn_v1shrsi(v, 7);
+}
+
+
+void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u64 *p, *q;
+ int d, z, z0;
+
+ u64 wd$$, wq$$, wp$$, w1$$, w2$$;
+ u64 x1d = NBYTES(0x1d);
+ u64 * z0ptr;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = (u64 *)dptr[z0+1]; /* XOR parity */
+ q = (u64 *)dptr[z0+2]; /* RS syndrome */
+
+ z0ptr = (u64 *)&dptr[z0][0];
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = *z0ptr++;
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
+ wp$$ = wp$$ ^ wd$$;
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ = w2$$ & x1d;
+ w1$$ = w1$$ ^ w2$$;
+ wq$$ = w1$$ ^ wd$$;
+ }
+ *p++ = wp$$;
+ *q++ = wq$$;
+ }
+}
+
+const struct raid6_calls raid6_tilegx$# = {
+ raid6_tilegx$#_gen_syndrome,
+ NULL,
+ "tilegx$#",
+ 0
+};
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c0e31fe2fab..65f4effd117 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -518,3 +518,43 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
*new = *victim;
}
EXPORT_SYMBOL(rb_replace_node);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+ for (;;) {
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+ else
+ return (struct rb_node *)node;
+ }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+ const struct rb_node *parent;
+ if (!node)
+ return NULL;
+ parent = rb_parent(node);
+
+ /* If we're sitting on node, we've already seen our children */
+ if (parent && node == parent->rb_left && parent->rb_right) {
+ /* If we are the parent's left node, go to the parent's right
+ * node then all the way down to the left */
+ return rb_left_deepest_node(parent->rb_right);
+ } else
+ /* Otherwise we are the parent's right node, and the parent
+ * should be next */
+ return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+ if (!root->rb_node)
+ return NULL;
+
+ return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 122f02f9941..31dd4ccd3ba 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb)
return count;
}
+static void check_postorder(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0;
+ for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb))
+ count++;
+
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
static void check(int nr_nodes)
{
struct rb_node *rb;
@@ -136,6 +146,8 @@ static void check(int nr_nodes)
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
+
+ check_postorder(nr_nodes);
}
static void check_augmented(int nr_nodes)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 37d9edcd14c..ce682f7a4f2 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -652,7 +652,7 @@ int pdflush_proc_obsolete(struct ctl_table *table, int write,
{
char kbuf[] = "0\n";
- if (*ppos) {
+ if (*ppos || *lenp < sizeof(kbuf)) {
*lenp = 0;
return 0;
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 05ccb4cc0bd..c43789388cd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1131,6 +1131,9 @@ void compact_pgdat(pg_data_t *pgdat, int order)
.sync = false,
};
+ if (!order)
+ return;
+
__compact_pgdat(pgdat, &cc);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 731a2c24532..e607728db4a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
if (error)
goto out;
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
page_cache_get(page);
page->mapping = mapping;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a92012a7170..963e14c0486 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -417,7 +417,7 @@ static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -444,7 +444,7 @@ static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -470,7 +470,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
int err;
unsigned long pages;
- err = strict_strtoul(buf, 10, &pages);
+ err = kstrtoul(buf, 10, &pages);
if (err || !pages || pages > UINT_MAX)
return -EINVAL;
@@ -538,7 +538,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
int err;
unsigned long max_ptes_none;
- err = strict_strtoul(buf, 10, &max_ptes_none);
+ err = kstrtoul(buf, 10, &max_ptes_none);
if (err || max_ptes_none > HPAGE_PMD_NR-1)
return -EINVAL;
@@ -2296,6 +2296,8 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out;
vma = find_vma(mm, address);
+ if (!vma)
+ goto out;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b60f33080a2..b49579c7f2a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -21,6 +21,7 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/page-isolation.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -33,7 +34,6 @@
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
int hugetlb_max_hstate __read_mostly;
@@ -48,7 +48,8 @@ static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
/*
- * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
+ * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
+ * free_huge_pages, and surplus_huge_pages.
*/
DEFINE_SPINLOCK(hugetlb_lock);
@@ -135,9 +136,9 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
* across the pages in a mapping.
*
* The region data structures are protected by a combination of the mmap_sem
- * and the hugetlb_instantion_mutex. To access or modify a region the caller
+ * and the hugetlb_instantiation_mutex. To access or modify a region the caller
* must either hold the mmap_sem for write, or the mmap_sem for read and
- * the hugetlb_instantiation mutex:
+ * the hugetlb_instantiation_mutex:
*
* down_write(&mm->mmap_sem);
* or
@@ -434,25 +435,6 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0;
}
-/* Decrement the reserved pages in the hugepage pool by one */
-static void decrement_hugepage_resv_vma(struct hstate *h,
- struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_NORESERVE)
- return;
-
- if (vma->vm_flags & VM_MAYSHARE) {
- /* Shared mappings always use reserves */
- h->resv_huge_pages--;
- } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
- /*
- * Only the process that called mmap() has reserves for
- * private mappings.
- */
- h->resv_huge_pages--;
- }
-}
-
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
@@ -462,12 +444,35 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
}
/* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma, long chg)
{
+ if (vma->vm_flags & VM_NORESERVE) {
+ /*
+ * This address is already reserved by other process(chg == 0),
+ * so, we should decrement reserved count. Without decrementing,
+ * reserve count remains after releasing inode, because this
+ * allocated page will go into page cache and is regarded as
+ * coming from reserved pool in releasing step. Currently, we
+ * don't have any other solution to deal with this situation
+ * properly, so add work-around here.
+ */
+ if (vma->vm_flags & VM_MAYSHARE && chg == 0)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* Shared mappings always use reserves */
if (vma->vm_flags & VM_MAYSHARE)
return 1;
+
+ /*
+ * Only the process that called mmap() has reserves for
+ * private mappings.
+ */
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
+
return 0;
}
@@ -517,9 +522,15 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
- if (list_empty(&h->hugepage_freelists[nid]))
+ list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
+ if (!is_migrate_isolate_page(page))
+ break;
+ /*
+ * if 'non-isolated free hugepage' not found on the list,
+ * the allocation fails.
+ */
+ if (&h->hugepage_freelists[nid] == &page->lru)
return NULL;
- page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
h->free_huge_pages--;
@@ -527,9 +538,19 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
return page;
}
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ if (hugepages_treat_as_movable || hugepage_migration_support(h))
+ return GFP_HIGHUSER_MOVABLE;
+ else
+ return GFP_HIGHUSER;
+}
+
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
- unsigned long address, int avoid_reserve)
+ unsigned long address, int avoid_reserve,
+ long chg)
{
struct page *page = NULL;
struct mempolicy *mpol;
@@ -539,16 +560,12 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
struct zoneref *z;
unsigned int cpuset_mems_cookie;
-retry_cpuset:
- cpuset_mems_cookie = get_mems_allowed();
- zonelist = huge_zonelist(vma, address,
- htlb_alloc_mask, &mpol, &nodemask);
/*
* A child process with MAP_PRIVATE mappings created by their parent
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
- if (!vma_has_reserves(vma) &&
+ if (!vma_has_reserves(vma, chg) &&
h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
@@ -556,13 +573,23 @@ retry_cpuset:
if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
+retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = huge_zonelist(vma, address,
+ htlb_alloc_mask(h), &mpol, &nodemask);
+
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
+ if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
- if (!avoid_reserve)
- decrement_hugepage_resv_vma(h, vma);
+ if (avoid_reserve)
+ break;
+ if (!vma_has_reserves(vma, chg))
+ break;
+
+ SetPagePrivate(page);
+ h->resv_huge_pages--;
break;
}
}
@@ -574,7 +601,6 @@ retry_cpuset:
return page;
err:
- mpol_cond_put(mpol);
return NULL;
}
@@ -620,15 +646,20 @@ static void free_huge_page(struct page *page)
int nid = page_to_nid(page);
struct hugepage_subpool *spool =
(struct hugepage_subpool *)page_private(page);
+ bool restore_reserve;
set_page_private(page, 0);
page->mapping = NULL;
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
+ restore_reserve = PagePrivate(page);
spin_lock(&hugetlb_lock);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
+ if (restore_reserve)
+ h->resv_huge_pages++;
+
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
/* remove the page from active list */
list_del(&page->lru);
@@ -715,7 +746,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
return NULL;
page = alloc_pages_exact_node(nid,
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
if (page) {
@@ -772,33 +803,6 @@ static int hstate_next_node_to_alloc(struct hstate *h,
return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
-{
- struct page *page;
- int start_nid;
- int next_nid;
- int ret = 0;
-
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
- page = alloc_fresh_huge_page_node(h, next_nid);
- if (page) {
- ret = 1;
- break;
- }
- next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- } while (next_nid != start_nid);
-
- if (ret)
- count_vm_event(HTLB_BUDDY_PGALLOC);
- else
- count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
-
- return ret;
-}
-
/*
* helper for free_pool_huge_page() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
@@ -817,6 +821,40 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
return nid;
}
+#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
+ for (nr_nodes = nodes_weight(*mask); \
+ nr_nodes > 0 && \
+ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
+ nr_nodes--)
+
+#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
+ for (nr_nodes = nodes_weight(*mask); \
+ nr_nodes > 0 && \
+ ((node = hstate_next_node_to_free(hs, mask)) || 1); \
+ nr_nodes--)
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+{
+ struct page *page;
+ int nr_nodes, node;
+ int ret = 0;
+
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ page = alloc_fresh_huge_page_node(h, node);
+ if (page) {
+ ret = 1;
+ break;
+ }
+ }
+
+ if (ret)
+ count_vm_event(HTLB_BUDDY_PGALLOC);
+ else
+ count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
+ return ret;
+}
+
/*
* Free huge page from pool from next node to free.
* Attempt to keep persistent huge pages more or less
@@ -826,40 +864,73 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
- int start_nid;
- int next_nid;
+ int nr_nodes, node;
int ret = 0;
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
- if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
- !list_empty(&h->hugepage_freelists[next_nid])) {
+ if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
+ !list_empty(&h->hugepage_freelists[node])) {
struct page *page =
- list_entry(h->hugepage_freelists[next_nid].next,
+ list_entry(h->hugepage_freelists[node].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
- h->free_huge_pages_node[next_nid]--;
+ h->free_huge_pages_node[node]--;
if (acct_surplus) {
h->surplus_huge_pages--;
- h->surplus_huge_pages_node[next_nid]--;
+ h->surplus_huge_pages_node[node]--;
}
update_and_free_page(h, page);
ret = 1;
break;
}
- next_nid = hstate_next_node_to_free(h, nodes_allowed);
- } while (next_nid != start_nid);
+ }
return ret;
}
+/*
+ * Dissolve a given free hugepage into free buddy pages. This function does
+ * nothing for in-use (including surplus) hugepages.
+ */
+static void dissolve_free_huge_page(struct page *page)
+{
+ spin_lock(&hugetlb_lock);
+ if (PageHuge(page) && !page_count(page)) {
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+ list_del(&page->lru);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[nid]--;
+ update_and_free_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+}
+
+/*
+ * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
+ * make specified memory blocks removable from the system.
+ * Note that start_pfn should aligned with (minimum) hugepage size.
+ */
+void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned int order = 8 * sizeof(void *);
+ unsigned long pfn;
+ struct hstate *h;
+
+ /* Set scan step to minimum hugepage size */
+ for_each_hstate(h)
+ if (order > huge_page_order(h))
+ order = huge_page_order(h);
+ VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+ dissolve_free_huge_page(pfn_to_page(pfn));
+}
+
static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
@@ -902,12 +973,12 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
spin_unlock(&hugetlb_lock);
if (nid == NUMA_NO_NODE)
- page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+ page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
else
page = alloc_pages_exact_node(nid,
- htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+ htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
if (page && arch_prepare_hugepage(page)) {
@@ -944,10 +1015,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
*/
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
- struct page *page;
+ struct page *page = NULL;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page_node(h, nid);
+ if (h->free_huge_pages - h->resv_huge_pages > 0)
+ page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
if (!page)
@@ -1035,11 +1107,8 @@ free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
- if (!list_empty(&surplus_list)) {
- list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
- put_page(page);
- }
- }
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru)
+ put_page(page);
spin_lock(&hugetlb_lock);
return ret;
@@ -1106,9 +1175,9 @@ static long vma_needs_reservation(struct hstate *h,
} else {
long err;
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
- err = region_chg(&reservations->regions, idx, idx + 1);
+ err = region_chg(&resv->regions, idx, idx + 1);
if (err < 0)
return err;
return 0;
@@ -1126,10 +1195,10 @@ static void vma_commit_reservation(struct hstate *h,
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
/* Mark this page used in the map. */
- region_add(&reservations->regions, idx, idx + 1);
+ region_add(&resv->regions, idx, idx + 1);
}
}
@@ -1155,38 +1224,35 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
return ERR_PTR(-ENOMEM);
- if (chg)
- if (hugepage_subpool_get_pages(spool, chg))
+ if (chg || avoid_reserve)
+ if (hugepage_subpool_get_pages(spool, 1))
return ERR_PTR(-ENOSPC);
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
if (ret) {
- hugepage_subpool_put_pages(spool, chg);
+ if (chg || avoid_reserve)
+ hugepage_subpool_put_pages(spool, 1);
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
- if (page) {
- /* update page cgroup details */
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
- h_cg, page);
- spin_unlock(&hugetlb_lock);
- } else {
+ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
+ if (!page) {
spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
pages_per_huge_page(h),
h_cg);
- hugepage_subpool_put_pages(spool, chg);
+ if (chg || avoid_reserve)
+ hugepage_subpool_put_pages(spool, 1);
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
- h_cg, page);
list_move(&page->lru, &h->hugepage_activelist);
- spin_unlock(&hugetlb_lock);
+ /* Fall through */
}
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+ spin_unlock(&hugetlb_lock);
set_page_private(page, (unsigned long)spool);
@@ -1194,17 +1260,29 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return page;
}
+/*
+ * alloc_huge_page()'s wrapper which simply returns the page if allocation
+ * succeeds, otherwise NULL. This function is called from new_vma_page(),
+ * where no ERR_VALUE is expected to be returned.
+ */
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+{
+ struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
+ if (IS_ERR(page))
+ page = NULL;
+ return page;
+}
+
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_states[N_MEMORY]);
+ int nr_nodes, node;
- while (nr_nodes) {
+ for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
void *addr;
- addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(hstate_next_node_to_alloc(h,
- &node_states[N_MEMORY])),
+ addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
@@ -1216,7 +1294,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
m = addr;
goto found;
}
- nr_nodes--;
}
return 0;
@@ -1355,48 +1432,28 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count,
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
- int start_nid, next_nid;
- int ret = 0;
+ int nr_nodes, node;
VM_BUG_ON(delta != -1 && delta != 1);
- if (delta < 0)
- start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
- else
- start_nid = hstate_next_node_to_free(h, nodes_allowed);
- next_nid = start_nid;
-
- do {
- int nid = next_nid;
- if (delta < 0) {
- /*
- * To shrink on this node, there must be a surplus page
- */
- if (!h->surplus_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_alloc(h,
- nodes_allowed);
- continue;
- }
+ if (delta < 0) {
+ for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
+ if (h->surplus_huge_pages_node[node])
+ goto found;
}
- if (delta > 0) {
- /*
- * Surplus cannot exceed the total number of pages
- */
- if (h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid]) {
- next_nid = hstate_next_node_to_free(h,
- nodes_allowed);
- continue;
- }
+ } else {
+ for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+ if (h->surplus_huge_pages_node[node] <
+ h->nr_huge_pages_node[node])
+ goto found;
}
+ }
+ return 0;
- h->surplus_huge_pages += delta;
- h->surplus_huge_pages_node[nid] += delta;
- ret = 1;
- break;
- } while (next_nid != start_nid);
-
- return ret;
+found:
+ h->surplus_huge_pages += delta;
+ h->surplus_huge_pages_node[node] += delta;
+ return 1;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
@@ -1526,7 +1583,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct hstate *h;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
- err = strict_strtoul(buf, 10, &count);
+ err = kstrtoul(buf, 10, &count);
if (err)
goto out;
@@ -1617,7 +1674,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
if (h->order >= MAX_ORDER)
return -EINVAL;
- err = strict_strtoul(buf, 10, &input);
+ err = kstrtoul(buf, 10, &input);
if (err)
return err;
@@ -2068,18 +2125,6 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
}
#endif /* CONFIG_NUMA */
-int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *length, loff_t *ppos)
-{
- proc_dointvec(table, write, buffer, length, ppos);
- if (hugepages_treat_as_movable)
- htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
- else
- htlb_alloc_mask = GFP_HIGHUSER;
- return 0;
-}
-
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
@@ -2207,7 +2252,7 @@ out:
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
/*
* This new VMA should share its siblings reservation map if present.
@@ -2217,34 +2262,34 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
* after this open call completes. It is therefore safe to take a
* new reference here without additional locking.
*/
- if (reservations)
- kref_get(&reservations->refs);
+ if (resv)
+ kref_get(&resv->refs);
}
static void resv_map_put(struct vm_area_struct *vma)
{
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
- if (!reservations)
+ if (!resv)
return;
- kref_put(&reservations->refs, resv_map_release);
+ kref_put(&resv->refs, resv_map_release);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
- struct resv_map *reservations = vma_resv_map(vma);
+ struct resv_map *resv = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve;
unsigned long start;
unsigned long end;
- if (reservations) {
+ if (resv) {
start = vma_hugecache_offset(h, vma, vma->vm_start);
end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) -
- region_count(&reservations->regions, start, end);
+ region_count(&resv->regions, start, end);
resv_map_put(vma);
@@ -2557,7 +2602,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page;
- int avoidcopy;
int outside_reserve = 0;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -2567,10 +2611,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
- avoidcopy = (page_mapcount(old_page) == 1);
- if (avoidcopy) {
- if (PageAnon(old_page))
- page_move_anon_rmap(old_page, vma, address);
+ if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
+ page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -2584,8 +2626,7 @@ retry_avoidcopy:
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
- if (!(vma->vm_flags & VM_MAYSHARE) &&
- is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
+ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
@@ -2657,6 +2698,8 @@ retry_avoidcopy:
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+ ClearPagePrivate(new_page);
+
/* Break COW */
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
@@ -2668,10 +2711,11 @@ retry_avoidcopy:
}
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
- /* Caller expects lock to be held */
- spin_lock(&mm->page_table_lock);
page_cache_release(new_page);
page_cache_release(old_page);
+
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return 0;
}
@@ -2767,6 +2811,7 @@ retry:
goto retry;
goto out;
}
+ ClearPagePrivate(page);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
@@ -2813,8 +2858,10 @@ retry:
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
- if (anon_rmap)
+ if (anon_rmap) {
+ ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, vma, address);
+ }
else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -3431,3 +3478,45 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
return ret;
}
#endif
+
+bool isolate_huge_page(struct page *page, struct list_head *list)
+{
+ VM_BUG_ON(!PageHead(page));
+ if (!get_page_unless_zero(page))
+ return false;
+ spin_lock(&hugetlb_lock);
+ list_move_tail(&page->lru, list);
+ spin_unlock(&hugetlb_lock);
+ return true;
+}
+
+void putback_active_hugepage(struct page *page)
+{
+ VM_BUG_ON(!PageHead(page));
+ spin_lock(&hugetlb_lock);
+ list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+ spin_unlock(&hugetlb_lock);
+ put_page(page);
+}
+
+bool is_hugepage_active(struct page *page)
+{
+ VM_BUG_ON(!PageHuge(page));
+ /*
+ * This function can be called for a tail page because the caller,
+ * scan_movable_pages, scans through a given pfn-range which typically
+ * covers one memory block. In systems using gigantic hugepage (1GB
+ * for x86_64,) a hugepage is larger than a memory block, and we don't
+ * support migrating such large hugepages for now, so return false
+ * when called for tail pages.
+ */
+ if (PageTail(page))
+ return false;
+ /*
+ * Refcount of a hwpoisoned hugepages is 1, but they are not active,
+ * so we should return false for them.
+ */
+ if (unlikely(PageHWPoison(page)))
+ return false;
+ return page_count(page) > 0;
+}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 3a61efc518d..afc2daa91c6 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -88,12 +88,12 @@ static int pfn_inject_init(void)
* hardware status change, hence do not require hardware support.
* They are mainly for testing hwpoison in software level.
*/
- dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+ dentry = debugfs_create_file("corrupt-pfn", 0200, hwpoison_dir,
NULL, &hwpoison_fops);
if (!dentry)
goto fail;
- dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir,
+ dentry = debugfs_create_file("unpoison-pfn", 0200, hwpoison_dir,
NULL, &unpoison_fops);
if (!dentry)
goto fail;
diff --git a/mm/internal.h b/mm/internal.h
index 4390ac6c106..684f7aa9692 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -85,6 +85,8 @@ extern unsigned long highest_memmap_pfn;
*/
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+extern bool zone_reclaimable(struct zone *zone);
/*
* in mm/rmap.c:
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c8d7f3110fd..e126b0ef9ad 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1639,7 +1639,7 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
else if (strncmp(buf, "scan=", 5) == 0) {
unsigned long secs;
- ret = strict_strtoul(buf + 5, 0, &secs);
+ ret = kstrtoul(buf + 5, 0, &secs);
if (ret < 0)
goto out;
stop_scan_thread();
diff --git a/mm/ksm.c b/mm/ksm.c
index b6afe0c440d..0bea2b262a4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2194,7 +2194,7 @@ static ssize_t sleep_millisecs_store(struct kobject *kobj,
unsigned long msecs;
int err;
- err = strict_strtoul(buf, 10, &msecs);
+ err = kstrtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
@@ -2217,7 +2217,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
int err;
unsigned long nr_pages;
- err = strict_strtoul(buf, 10, &nr_pages);
+ err = kstrtoul(buf, 10, &nr_pages);
if (err || nr_pages > UINT_MAX)
return -EINVAL;
@@ -2239,7 +2239,7 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
int err;
unsigned long flags;
- err = strict_strtoul(buf, 10, &flags);
+ err = kstrtoul(buf, 10, &flags);
if (err || flags > UINT_MAX)
return -EINVAL;
if (flags > KSM_RUN_UNMERGE)
diff --git a/mm/madvise.c b/mm/madvise.c
index 7055883e6e2..6975bc81254 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -42,11 +42,11 @@ static int madvise_need_mmap_write(int behavior)
* We can potentially split a vm area into separate
* areas, each area with its own behavior.
*/
-static long madvise_behavior(struct vm_area_struct * vma,
+static long madvise_behavior(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end, int behavior)
{
- struct mm_struct * mm = vma->vm_mm;
+ struct mm_struct *mm = vma->vm_mm;
int error = 0;
pgoff_t pgoff;
unsigned long new_flags = vma->vm_flags;
@@ -215,8 +215,8 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
/*
* Schedule all required I/O operations. Do not wait for completion.
*/
-static long madvise_willneed(struct vm_area_struct * vma,
- struct vm_area_struct ** prev,
+static long madvise_willneed(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
struct file *file = vma->vm_file;
@@ -270,8 +270,8 @@ static long madvise_willneed(struct vm_area_struct * vma,
* An interface that causes the system to free clean pages and flush
* dirty pages is already available as msync(MS_INVALIDATE).
*/
-static long madvise_dontneed(struct vm_area_struct * vma,
- struct vm_area_struct ** prev,
+static long madvise_dontneed(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
*prev = vma;
@@ -343,29 +343,34 @@ static long madvise_remove(struct vm_area_struct *vma,
*/
static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
{
- int ret = 0;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
for (; start < end; start += PAGE_SIZE) {
struct page *p;
- int ret = get_user_pages_fast(start, 1, 0, &p);
+ int ret;
+
+ ret = get_user_pages_fast(start, 1, 0, &p);
if (ret != 1)
return ret;
+
+ if (PageHWPoison(p)) {
+ put_page(p);
+ continue;
+ }
if (bhv == MADV_SOFT_OFFLINE) {
- printk(KERN_INFO "Soft offlining page %lx at %lx\n",
+ pr_info("Soft offlining page %#lx at %#lx\n",
page_to_pfn(p), start);
ret = soft_offline_page(p, MF_COUNT_INCREASED);
if (ret)
- break;
+ return ret;
continue;
}
- printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
+ pr_info("Injecting memory failure for page %#lx at %#lx\n",
page_to_pfn(p), start);
/* Ignore return value for now */
memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
}
- return ret;
+ return 0;
}
#endif
@@ -459,7 +464,7 @@ madvise_behavior_valid(int behavior)
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
{
unsigned long end, tmp;
- struct vm_area_struct * vma, *prev;
+ struct vm_area_struct *vma, *prev;
int unmapped_error = 0;
int error = -EINVAL;
int write;
diff --git a/mm/memblock.c b/mm/memblock.c
index a847bfe6f3b..0ac412a0a7e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -914,6 +914,24 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
return memblock_search(&memblock.memory, addr) != -1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
+ unsigned long *start_pfn, unsigned long *end_pfn)
+{
+ struct memblock_type *type = &memblock.memory;
+ int mid = memblock_search(type, (phys_addr_t)pfn << PAGE_SHIFT);
+
+ if (mid == -1)
+ return -1;
+
+ *start_pfn = type->regions[mid].base >> PAGE_SHIFT;
+ *end_pfn = (type->regions[mid].base + type->regions[mid].size)
+ >> PAGE_SHIFT;
+
+ return type->regions[mid].nid;
+}
+#endif
+
/**
* memblock_is_region_memory - check if a region is a subset of memory
* @base: base of region to check
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3b83957b643..c6bd28edd53 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3121,7 +3121,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
ssize_t size = memcg_caches_array_size(num_groups);
size *= sizeof(void *);
- size += sizeof(struct memcg_cache_params);
+ size += offsetof(struct memcg_cache_params, memcg_caches);
s->memcg_params = kzalloc(size, GFP_KERNEL);
if (!s->memcg_params) {
@@ -3164,13 +3164,16 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache)
{
- size_t size = sizeof(struct memcg_cache_params);
+ size_t size;
if (!memcg_kmem_enabled())
return 0;
- if (!memcg)
+ if (!memcg) {
+ size = offsetof(struct memcg_cache_params, memcg_caches);
size += memcg_limited_groups_array_size * sizeof(void *);
+ } else
+ size = sizeof(struct memcg_cache_params);
s->memcg_params = kzalloc(size, GFP_KERNEL);
if (!s->memcg_params)
@@ -5588,7 +5591,13 @@ static int compare_thresholds(const void *a, const void *b)
const struct mem_cgroup_threshold *_a = a;
const struct mem_cgroup_threshold *_b = b;
- return _a->threshold - _b->threshold;
+ if (_a->threshold > _b->threshold)
+ return 1;
+
+ if (_a->threshold < _b->threshold)
+ return -1;
+
+ return 0;
}
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d84c5e5331b..d472e14c680 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -206,7 +206,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
- si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
+ si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
if ((flags & MF_ACTION_REQUIRED) && t == current) {
si.si_code = BUS_MCEERR_AR;
@@ -983,7 +983,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
static void set_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_trans_order(hpage);
+ int nr_pages = 1 << compound_order(hpage);
for (i = 0; i < nr_pages; i++)
SetPageHWPoison(hpage + i);
}
@@ -991,7 +991,7 @@ static void set_page_hwpoison_huge_page(struct page *hpage)
static void clear_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_trans_order(hpage);
+ int nr_pages = 1 << compound_order(hpage);
for (i = 0; i < nr_pages; i++)
ClearPageHWPoison(hpage + i);
}
@@ -1204,6 +1204,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
for (ps = error_states;; ps++)
if ((p->flags & ps->mask) == ps->res)
break;
+
+ page_flags |= (p->flags & (1UL << PG_dirty));
+
if (!ps->mask)
for (ps = error_states;; ps++)
if ((page_flags & ps->mask) == ps->res)
@@ -1339,7 +1342,17 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- nr_pages = 1 << compound_trans_order(page);
+ /*
+ * unpoison_memory() can encounter thp only when the thp is being
+ * worked by memory_failure() and the page lock is not held yet.
+ * In such case, we yield to memory_failure() and make unpoison fail.
+ */
+ if (PageTransHuge(page)) {
+ pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
+ return 0;
+ }
+
+ nr_pages = 1 << compound_order(page);
if (!get_page_unless_zero(page)) {
/*
@@ -1353,7 +1366,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
if (TestClearPageHWPoison(p))
- atomic_long_sub(nr_pages, &num_poisoned_pages);
+ atomic_long_dec(&num_poisoned_pages);
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
@@ -1375,7 +1388,7 @@ int unpoison_memory(unsigned long pfn)
unlock_page(page);
put_page(page);
- if (freeit)
+ if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
put_page(page);
return 0;
@@ -1416,7 +1429,8 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
* was free. This flag should be kept set until the source page
* is freed and PG_hwpoison on it is set.
*/
- set_migratetype_isolate(p, true);
+ if (get_pageblock_migratetype(p) != MIGRATE_ISOLATE)
+ set_migratetype_isolate(p, true);
/*
* When the target page is a free hugepage, just remove it
* from free hugepage list.
@@ -1470,6 +1484,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
int ret;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
+ LIST_HEAD(pagelist);
/*
* This double-check of PageHWPoison is to avoid the race with
@@ -1485,86 +1500,29 @@ static int soft_offline_huge_page(struct page *page, int flags)
unlock_page(hpage);
/* Keep page count to indicate a given hugepage is isolated. */
- ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
- MIGRATE_SYNC);
- put_page(hpage);
+ list_move(&hpage->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+ MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
+ /*
+ * We know that soft_offline_huge_page() tries to migrate
+ * only one hugepage pointed to by hpage, so we need not
+ * run through the pagelist here.
+ */
+ putback_active_hugepage(hpage);
+ if (ret > 0)
+ ret = -EIO;
} else {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_trans_order(hpage),
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
}
return ret;
}
-static int __soft_offline_page(struct page *page, int flags);
-
-/**
- * soft_offline_page - Soft offline a page.
- * @page: page to offline
- * @flags: flags. Same as memory_failure().
- *
- * Returns 0 on success, otherwise negated errno.
- *
- * Soft offline a page, by migration or invalidation,
- * without killing anything. This is for the case when
- * a page is not corrupted yet (so it's still valid to access),
- * but has had a number of corrected errors and is better taken
- * out.
- *
- * The actual policy on when to do that is maintained by
- * user space.
- *
- * This should never impact any application or cause data loss,
- * however it might take some time.
- *
- * This is not a 100% solution for all memory, but tries to be
- * ``good enough'' for the majority of memory.
- */
-int soft_offline_page(struct page *page, int flags)
-{
- int ret;
- unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_trans_head(page);
-
- if (PageHWPoison(page)) {
- pr_info("soft offline: %#lx page already poisoned\n", pfn);
- return -EBUSY;
- }
- if (!PageHuge(page) && PageTransHuge(hpage)) {
- if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
- pr_info("soft offline: %#lx: failed to split THP\n",
- pfn);
- return -EBUSY;
- }
- }
-
- ret = get_any_page(page, pfn, flags);
- if (ret < 0)
- return ret;
- if (ret) { /* for in-use pages */
- if (PageHuge(page))
- ret = soft_offline_huge_page(page, flags);
- else
- ret = __soft_offline_page(page, flags);
- } else { /* for free pages */
- if (PageHuge(page)) {
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_trans_order(hpage),
- &num_poisoned_pages);
- } else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
- }
- }
- unset_migratetype_isolate(page, MIGRATE_MOVABLE);
- return ret;
-}
-
static int __soft_offline_page(struct page *page, int flags)
{
int ret;
@@ -1651,3 +1609,67 @@ static int __soft_offline_page(struct page *page, int flags)
}
return ret;
}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+ struct page *hpage = compound_trans_head(page);
+
+ if (PageHWPoison(page)) {
+ pr_info("soft offline: %#lx page already poisoned\n", pfn);
+ return -EBUSY;
+ }
+ if (!PageHuge(page) && PageTransHuge(hpage)) {
+ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+ pr_info("soft offline: %#lx: failed to split THP\n",
+ pfn);
+ return -EBUSY;
+ }
+ }
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+ goto unset;
+ if (ret) { /* for in-use pages */
+ if (PageHuge(page))
+ ret = soft_offline_huge_page(page, flags);
+ else
+ ret = __soft_offline_page(page, flags);
+ } else { /* for free pages */
+ if (PageHuge(page)) {
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ atomic_long_add(1 << compound_order(hpage),
+ &num_poisoned_pages);
+ } else {
+ SetPageHWPoison(page);
+ atomic_long_inc(&num_poisoned_pages);
+ }
+ }
+unset:
+ unset_migratetype_isolate(page, MIGRATE_MOVABLE);
+ return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index b3c6bf9a398..2b73dbde227 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -373,30 +373,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
- * If a p?d_bad entry is found while walking page tables, report
- * the error, before resetting entry to p?d_none. Usually (but
- * very seldom) called out from the p?d_none_or_clear_bad macros.
- */
-
-void pgd_clear_bad(pgd_t *pgd)
-{
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
-}
-
-void pud_clear_bad(pud_t *pud)
-{
- pud_ERROR(*pud);
- pud_clear(pud);
-}
-
-void pmd_clear_bad(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
-}
-
-/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
@@ -1505,7 +1481,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pud_none(*pud))
goto no_page_table;
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- BUG_ON(flags & FOLL_GET);
+ if (flags & FOLL_GET)
+ goto out;
page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
goto out;
}
@@ -1516,8 +1493,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pmd_none(*pmd))
goto no_page_table;
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
- BUG_ON(flags & FOLL_GET);
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
+ if (flags & FOLL_GET) {
+ /*
+ * Refcount on tail pages are not well-defined and
+ * shouldn't be taken. The caller should handle a NULL
+ * return when trying to follow tail pages.
+ */
+ if (PageHead(page))
+ get_page(page);
+ else {
+ page = NULL;
+ goto out;
+ }
+ }
goto out;
}
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ca1dd3aa5ee..0eb1a1df649 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -30,6 +30,7 @@
#include <linux/mm_inline.h>
#include <linux/firmware-map.h>
#include <linux/stop_machine.h>
+#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
@@ -194,7 +195,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
zone = &pgdat->node_zones[0];
for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
- if (zone->wait_table) {
+ if (zone_is_initialized(zone)) {
nr_pages = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
@@ -229,8 +230,8 @@ static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
zone_span_writelock(zone);
- old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
+ old_zone_end_pfn = zone_end_pfn(zone);
+ if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
@@ -305,7 +306,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
goto out_fail;
/* use start_pfn for z1's start_pfn if z1 is empty */
- if (z1->spanned_pages)
+ if (!zone_is_empty(z1))
z1_start_pfn = z1->zone_start_pfn;
else
z1_start_pfn = start_pfn;
@@ -347,7 +348,7 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
goto out_fail;
/* use end_pfn for z2's end_pfn if z2 is empty */
- if (z2->spanned_pages)
+ if (!zone_is_empty(z2))
z2_end_pfn = zone_end_pfn(z2);
else
z2_end_pfn = end_pfn;
@@ -514,8 +515,9 @@ static int find_biggest_section_pfn(int nid, struct zone *zone,
static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long zone_start_pfn = zone->zone_start_pfn;
- unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ unsigned long zone_start_pfn = zone->zone_start_pfn;
+ unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
+ unsigned long zone_end_pfn = z;
unsigned long pfn;
struct mem_section *ms;
int nid = zone_to_nid(zone);
@@ -1069,6 +1071,23 @@ out:
return ret;
}
+static int check_hotplug_memory_range(u64 start, u64 size)
+{
+ u64 start_pfn = start >> PAGE_SHIFT;
+ u64 nr_pages = size >> PAGE_SHIFT;
+
+ /* Memory range must be aligned with section */
+ if ((start_pfn & ~PAGE_SECTION_MASK) ||
+ (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
+ pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
+ (unsigned long long)start,
+ (unsigned long long)size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
int __ref add_memory(int nid, u64 start, u64 size)
{
@@ -1078,6 +1097,10 @@ int __ref add_memory(int nid, u64 start, u64 size)
struct resource *res;
int ret;
+ ret = check_hotplug_memory_range(start, size);
+ if (ret)
+ return ret;
+
lock_memory_hotplug();
res = register_memory_resource(start, size);
@@ -1208,10 +1231,12 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
}
/*
- * Scanning pfn is much easier than scanning lru list.
- * Scan pfn from start to end and Find LRU page.
+ * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
+ * and hugepages). We scan pfn because it's much easier than scanning over
+ * linked list. This function returns the pfn of the first found movable
+ * page if it's found, otherwise 0.
*/
-static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
+static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
@@ -1220,6 +1245,13 @@ static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
+ if (PageHuge(page)) {
+ if (is_hugepage_active(page))
+ return pfn;
+ else
+ pfn = round_up(pfn + 1,
+ 1 << compound_order(page)) - 1;
+ }
}
}
return 0;
@@ -1240,6 +1272,19 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
+
+ if (PageHuge(page)) {
+ struct page *head = compound_head(page);
+ pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+ if (compound_order(head) > PFN_SECTION_SHIFT) {
+ ret = -EBUSY;
+ break;
+ }
+ if (isolate_huge_page(page, &source))
+ move_pages -= 1 << compound_order(head);
+ continue;
+ }
+
if (!get_page_unless_zero(page))
continue;
/*
@@ -1272,7 +1317,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
}
if (!list_empty(&source)) {
if (not_managed) {
- putback_lru_pages(&source);
+ putback_movable_pages(&source);
goto out;
}
@@ -1283,7 +1328,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
ret = migrate_pages(&source, alloc_migrate_target, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret)
- putback_lru_pages(&source);
+ putback_movable_pages(&source);
}
out:
return ret;
@@ -1472,7 +1517,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
struct zone *zone;
struct memory_notify arg;
- BUG_ON(start_pfn >= end_pfn);
/* at least, alignment against pageblock is necessary */
if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
return -EINVAL;
@@ -1527,8 +1571,8 @@ repeat:
drain_all_pages();
}
- pfn = scan_lru_pages(start_pfn, end_pfn);
- if (pfn) { /* We have page on LRU */
+ pfn = scan_movable_pages(start_pfn, end_pfn);
+ if (pfn) { /* We have movable pages */
ret = do_migrate_range(pfn, end_pfn);
if (!ret) {
drain = 1;
@@ -1547,6 +1591,11 @@ repeat:
yield();
/* drain pcp pages, this is synchronous. */
drain_all_pages();
+ /*
+ * dissolve free hugepages in the memory block before doing offlining
+ * actually in order to make hugetlbfs's object counting consistent.
+ */
+ dissolve_free_huge_pages(start_pfn, end_pfn);
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
@@ -1674,9 +1723,8 @@ static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
return ret;
}
-static int check_cpu_on_node(void *data)
+static int check_cpu_on_node(pg_data_t *pgdat)
{
- struct pglist_data *pgdat = data;
int cpu;
for_each_present_cpu(cpu) {
@@ -1691,10 +1739,9 @@ static int check_cpu_on_node(void *data)
return 0;
}
-static void unmap_cpu_on_node(void *data)
+static void unmap_cpu_on_node(pg_data_t *pgdat)
{
#ifdef CONFIG_ACPI_NUMA
- struct pglist_data *pgdat = data;
int cpu;
for_each_possible_cpu(cpu)
@@ -1703,10 +1750,11 @@ static void unmap_cpu_on_node(void *data)
#endif
}
-static int check_and_unmap_cpu_on_node(void *data)
+static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
{
- int ret = check_cpu_on_node(data);
+ int ret;
+ ret = check_cpu_on_node(pgdat);
if (ret)
return ret;
@@ -1715,11 +1763,18 @@ static int check_and_unmap_cpu_on_node(void *data)
* the cpu_to_node() now.
*/
- unmap_cpu_on_node(data);
+ unmap_cpu_on_node(pgdat);
return 0;
}
-/* offline the node if all memory sections of this node are removed */
+/**
+ * try_offline_node
+ *
+ * Offline a node if all memory sections and cpus of the node are removed.
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call.
+ */
void try_offline_node(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
@@ -1745,7 +1800,7 @@ void try_offline_node(int nid)
return;
}
- if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL))
+ if (check_and_unmap_cpu_on_node(pgdat))
return;
/*
@@ -1782,10 +1837,19 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
+/**
+ * remove_memory
+ *
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations before this call, as required by
+ * try_offline_node().
+ */
void __ref remove_memory(int nid, u64 start, u64 size)
{
int ret;
+ BUG_ON(check_hotplug_memory_range(start, size));
+
lock_memory_hotplug();
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4baf12e534d..04729647f35 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -123,16 +123,19 @@ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
static struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
- int node;
if (!pol) {
- node = numa_node_id();
- if (node != NUMA_NO_NODE)
- pol = &preferred_node_policy[node];
+ int node = numa_node_id();
- /* preferred_node_policy is not initialised early in boot */
- if (!pol->mode)
- pol = NULL;
+ if (node != NUMA_NO_NODE) {
+ pol = &preferred_node_policy[node];
+ /*
+ * preferred_node_policy is not initialised early in
+ * boot
+ */
+ if (!pol->mode)
+ pol = NULL;
+ }
}
return pol;
@@ -473,8 +476,11 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
-/* Scan through pages checking if pages follow certain conditions. */
-static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+/*
+ * Scan through pages checking if pages follow certain conditions,
+ * and move them to the pagelist if they do.
+ */
+static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -512,7 +518,31 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return addr != end;
}
-static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
+ pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
+ void *private)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ int nid;
+ struct page *page;
+
+ spin_lock(&vma->vm_mm->page_table_lock);
+ page = pte_page(huge_ptep_get((pte_t *)pmd));
+ nid = page_to_nid(page);
+ if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ goto unlock;
+ /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+ if (flags & (MPOL_MF_MOVE_ALL) ||
+ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
+ isolate_huge_page(page, private);
+unlock:
+ spin_unlock(&vma->vm_mm->page_table_lock);
+#else
+ BUG();
+#endif
+}
+
+static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -523,17 +553,24 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
+ if (!pmd_present(*pmd))
+ continue;
+ if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
+ queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
+ flags, private);
+ continue;
+ }
split_huge_page_pmd(vma, addr, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
- if (check_pte_range(vma, pmd, addr, next, nodes,
+ if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
}
-static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -544,16 +581,18 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
+ if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
+ continue;
if (pud_none_or_clear_bad(pud))
continue;
- if (check_pmd_range(vma, pud, addr, next, nodes,
+ if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
flags, private))
return -EIO;
} while (pud++, addr = next, addr != end);
return 0;
}
-static inline int check_pgd_range(struct vm_area_struct *vma,
+static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags,
void *private)
@@ -566,7 +605,7 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- if (check_pud_range(vma, pgd, addr, next, nodes,
+ if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
flags, private))
return -EIO;
} while (pgd++, addr = next, addr != end);
@@ -604,12 +643,14 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
/*
- * Check if all pages in a range are on a set of nodes.
- * If pagelist != NULL then isolate pages from the LRU and
- * put them on the pagelist.
+ * Walk through page tables and collect pages to be migrated.
+ *
+ * If pages found in a given range are on a set of nodes (determined by
+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
+ * passed via @private.)
*/
static struct vm_area_struct *
-check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
int err;
@@ -635,9 +676,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return ERR_PTR(-EFAULT);
}
- if (is_vm_hugetlb_page(vma))
- goto next;
-
if (flags & MPOL_MF_LAZY) {
change_prot_numa(vma, start, endvma);
goto next;
@@ -647,7 +685,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
vma_migratable(vma))) {
- err = check_pgd_range(vma, start, endvma, nodes,
+ err = queue_pages_pgd_range(vma, start, endvma, nodes,
flags, private);
if (err) {
first = ERR_PTR(err);
@@ -990,7 +1028,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{
- return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
+ if (PageHuge(page))
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ node);
+ else
+ return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
}
/*
@@ -1013,14 +1055,14 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
- check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
return err;
@@ -1154,10 +1196,14 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
break;
vma = vma->vm_next;
}
-
/*
- * if !vma, alloc_page_vma() will use task or system default policy
+ * queue_pages_range() confirms that @page belongs to some vma,
+ * so vma shouldn't be NULL.
*/
+ BUG_ON(!vma);
+
+ if (PageHuge(page))
+ return alloc_huge_page_noerr(vma, address, 1);
return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
}
#else
@@ -1249,7 +1295,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err)
goto mpol_out;
- vma = check_range(mm, start, end, nmask,
+ vma = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
err = PTR_ERR(vma); /* maybe ... */
@@ -1265,7 +1311,7 @@ static long do_mbind(unsigned long start, unsigned long len,
(unsigned long)vma,
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
if (nr_failed && (flags & MPOL_MF_STRICT))
@@ -2065,6 +2111,16 @@ retry_cpuset:
}
EXPORT_SYMBOL(alloc_pages_current);
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+ struct mempolicy *pol = mpol_dup(vma_policy(src));
+
+ if (IS_ERR(pol))
+ return PTR_ERR(pol);
+ dst->vm_policy = pol;
+ return 0;
+}
+
/*
* If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
diff --git a/mm/mempool.c b/mm/mempool.c
index 54990476c04..659aa42bad1 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -73,7 +73,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
+ pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
diff --git a/mm/migrate.c b/mm/migrate.c
index 6f0c24438bb..b7ded7eafe3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,6 +100,10 @@ void putback_movable_pages(struct list_head *l)
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
+ if (unlikely(PageHuge(page))) {
+ putback_active_hugepage(page);
+ continue;
+ }
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
@@ -945,6 +949,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
struct page *new_hpage = get_new_page(hpage, private, &result);
struct anon_vma *anon_vma = NULL;
+ /*
+ * Movability of hugepages depends on architectures and hugepage size.
+ * This check is necessary because some callers of hugepage migration
+ * like soft offline and memory hotremove don't walk through page
+ * tables or check whether the hugepage is pmd-based or not before
+ * kicking migration.
+ */
+ if (!hugepage_migration_support(page_hstate(hpage)))
+ return -ENOSYS;
+
if (!new_hpage)
return -ENOMEM;
@@ -975,6 +989,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
unlock_page(hpage);
out:
+ if (rc != -EAGAIN)
+ putback_active_hugepage(hpage);
put_page(new_hpage);
if (result) {
if (rc)
@@ -1025,7 +1041,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
list_for_each_entry_safe(page, page2, from, lru) {
cond_resched();
- rc = unmap_and_move(get_new_page, private,
+ if (PageHuge(page))
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, page, pass > 2, mode);
+ else
+ rc = unmap_and_move(get_new_page, private,
page, pass > 2, mode);
switch(rc) {
@@ -1058,32 +1078,6 @@ out:
return rc;
}
-int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
- unsigned long private, enum migrate_mode mode)
-{
- int pass, rc;
-
- for (pass = 0; pass < 10; pass++) {
- rc = unmap_and_move_huge_page(get_new_page, private,
- hpage, pass > 2, mode);
- switch (rc) {
- case -ENOMEM:
- goto out;
- case -EAGAIN:
- /* try again */
- cond_resched();
- break;
- case MIGRATEPAGE_SUCCESS:
- goto out;
- default:
- rc = -EIO;
- goto out;
- }
- }
-out:
- return rc;
-}
-
#ifdef CONFIG_NUMA
/*
* Move a list of individual pages
@@ -1108,7 +1102,11 @@ static struct page *new_page_node(struct page *p, unsigned long private,
*result = &pm->status;
- return alloc_pages_exact_node(pm->node,
+ if (PageHuge(p))
+ return alloc_huge_page_node(page_hstate(compound_head(p)),
+ pm->node);
+ else
+ return alloc_pages_exact_node(pm->node,
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
}
@@ -1168,6 +1166,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
!migrate_all)
goto put_and_set;
+ if (PageHuge(page)) {
+ isolate_huge_page(page, &pagelist);
+ goto put_and_set;
+ }
+
err = isolate_lru_page(page);
if (!err) {
list_add_tail(&page->lru, &pagelist);
@@ -1190,7 +1193,7 @@ set_status:
err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
}
up_read(&mm->mmap_sem);
@@ -1468,7 +1471,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable)
+ if (!zone_reclaimable(zone))
continue;
/* Avoid waking kswapd by allocating pages_to_migrate pages. */
diff --git a/mm/mlock.c b/mm/mlock.c
index 79b7cf7d1bc..d6380266324 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -11,6 +11,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
#include <linux/sched.h>
@@ -18,6 +19,8 @@
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
#include "internal.h"
@@ -87,6 +90,47 @@ void mlock_vma_page(struct page *page)
}
}
+/*
+ * Finish munlock after successful page isolation
+ *
+ * Page must be locked. This is a wrapper for try_to_munlock()
+ * and putback_lru_page() with munlock accounting.
+ */
+static void __munlock_isolated_page(struct page *page)
+{
+ int ret = SWAP_AGAIN;
+
+ /*
+ * Optimization: if the page was mapped just once, that's our mapping
+ * and we don't need to check all the other vmas.
+ */
+ if (page_mapcount(page) > 1)
+ ret = try_to_munlock(page);
+
+ /* Did try_to_unlock() succeed or punt? */
+ if (ret != SWAP_MLOCK)
+ count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+ putback_lru_page(page);
+}
+
+/*
+ * Accounting for page isolation fail during munlock
+ *
+ * Performs accounting when page isolation fails in munlock. There is nothing
+ * else to do because it means some other task has already removed the page
+ * from the LRU. putback_lru_page() will take care of removing the page from
+ * the unevictable list, if necessary. vmscan [page_referenced()] will move
+ * the page back to the unevictable list if some other vma has it mlocked.
+ */
+static void __munlock_isolation_failed(struct page *page)
+{
+ if (PageUnevictable(page))
+ count_vm_event(UNEVICTABLE_PGSTRANDED);
+ else
+ count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+}
+
/**
* munlock_vma_page - munlock a vma page
* @page - page to be unlocked
@@ -112,37 +156,10 @@ unsigned int munlock_vma_page(struct page *page)
unsigned int nr_pages = hpage_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
page_mask = nr_pages - 1;
- if (!isolate_lru_page(page)) {
- int ret = SWAP_AGAIN;
-
- /*
- * Optimization: if the page was mapped just once,
- * that's our mapping and we don't need to check all the
- * other vmas.
- */
- if (page_mapcount(page) > 1)
- ret = try_to_munlock(page);
- /*
- * did try_to_unlock() succeed or punt?
- */
- if (ret != SWAP_MLOCK)
- count_vm_event(UNEVICTABLE_PGMUNLOCKED);
-
- putback_lru_page(page);
- } else {
- /*
- * Some other task has removed the page from the LRU.
- * putback_lru_page() will take care of removing the
- * page from the unevictable list, if necessary.
- * vmscan [page_referenced()] will move the page back
- * to the unevictable list if some other vma has it
- * mlocked.
- */
- if (PageUnevictable(page))
- count_vm_event(UNEVICTABLE_PGSTRANDED);
- else
- count_vm_event(UNEVICTABLE_PGMUNLOCKED);
- }
+ if (!isolate_lru_page(page))
+ __munlock_isolated_page(page);
+ else
+ __munlock_isolation_failed(page);
}
return page_mask;
@@ -210,6 +227,191 @@ static int __mlock_posix_error_return(long retval)
}
/*
+ * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
+ *
+ * The fast path is available only for evictable pages with single mapping.
+ * Then we can bypass the per-cpu pvec and get better performance.
+ * when mapcount > 1 we need try_to_munlock() which can fail.
+ * when !page_evictable(), we need the full redo logic of putback_lru_page to
+ * avoid leaving evictable page in unevictable list.
+ *
+ * In case of success, @page is added to @pvec and @pgrescued is incremented
+ * in case that the page was previously unevictable. @page is also unlocked.
+ */
+static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
+ int *pgrescued)
+{
+ VM_BUG_ON(PageLRU(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ if (page_mapcount(page) <= 1 && page_evictable(page)) {
+ pagevec_add(pvec, page);
+ if (TestClearPageUnevictable(page))
+ (*pgrescued)++;
+ unlock_page(page);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Putback multiple evictable pages to the LRU
+ *
+ * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
+ * the pages might have meanwhile become unevictable but that is OK.
+ */
+static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
+{
+ count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
+ /*
+ *__pagevec_lru_add() calls release_pages() so we don't call
+ * put_page() explicitly
+ */
+ __pagevec_lru_add(pvec);
+ count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+}
+
+/*
+ * Munlock a batch of pages from the same zone
+ *
+ * The work is split to two main phases. First phase clears the Mlocked flag
+ * and attempts to isolate the pages, all under a single zone lru lock.
+ * The second phase finishes the munlock only for pages where isolation
+ * succeeded.
+ *
+ * Note that the pagevec may be modified during the process.
+ */
+static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
+{
+ int i;
+ int nr = pagevec_count(pvec);
+ int delta_munlocked = -nr;
+ struct pagevec pvec_putback;
+ int pgrescued = 0;
+
+ /* Phase 1: page isolation */
+ spin_lock_irq(&zone->lru_lock);
+ for (i = 0; i < nr; i++) {
+ struct page *page = pvec->pages[i];
+
+ if (TestClearPageMlocked(page)) {
+ struct lruvec *lruvec;
+ int lru;
+
+ if (PageLRU(page)) {
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+ lru = page_lru(page);
+ /*
+ * We already have pin from follow_page_mask()
+ * so we can spare the get_page() here.
+ */
+ ClearPageLRU(page);
+ del_page_from_lru_list(page, lruvec, lru);
+ } else {
+ __munlock_isolation_failed(page);
+ goto skip_munlock;
+ }
+
+ } else {
+skip_munlock:
+ /*
+ * We won't be munlocking this page in the next phase
+ * but we still need to release the follow_page_mask()
+ * pin.
+ */
+ pvec->pages[i] = NULL;
+ put_page(page);
+ delta_munlocked++;
+ }
+ }
+ __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
+ spin_unlock_irq(&zone->lru_lock);
+
+ /* Phase 2: page munlock */
+ pagevec_init(&pvec_putback, 0);
+ for (i = 0; i < nr; i++) {
+ struct page *page = pvec->pages[i];
+
+ if (page) {
+ lock_page(page);
+ if (!__putback_lru_fast_prepare(page, &pvec_putback,
+ &pgrescued)) {
+ /*
+ * Slow path. We don't want to lose the last
+ * pin before unlock_page()
+ */
+ get_page(page); /* for putback_lru_page() */
+ __munlock_isolated_page(page);
+ unlock_page(page);
+ put_page(page); /* from follow_page_mask() */
+ }
+ }
+ }
+
+ /*
+ * Phase 3: page putback for pages that qualified for the fast path
+ * This will also call put_page() to return pin from follow_page_mask()
+ */
+ if (pagevec_count(&pvec_putback))
+ __putback_lru_fast(&pvec_putback, pgrescued);
+}
+
+/*
+ * Fill up pagevec for __munlock_pagevec using pte walk
+ *
+ * The function expects that the struct page corresponding to @start address is
+ * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
+ *
+ * The rest of @pvec is filled by subsequent pages within the same pmd and same
+ * zone, as long as the pte's are present and vm_normal_page() succeeds. These
+ * pages also get pinned.
+ *
+ * Returns the address of the next page that should be scanned. This equals
+ * @start + PAGE_SIZE when no page could be added by the pte walk.
+ */
+static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
+ struct vm_area_struct *vma, int zoneid, unsigned long start,
+ unsigned long end)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ /*
+ * Initialize pte walk starting at the already pinned page where we
+ * are sure that there is a pte.
+ */
+ pte = get_locked_pte(vma->vm_mm, start, &ptl);
+ end = min(end, pmd_addr_end(start, end));
+
+ /* The page next to the pinned page is the first we will try to get */
+ start += PAGE_SIZE;
+ while (start < end) {
+ struct page *page = NULL;
+ pte++;
+ if (pte_present(*pte))
+ page = vm_normal_page(vma, start, *pte);
+ /*
+ * Break if page could not be obtained or the page's node+zone does not
+ * match
+ */
+ if (!page || page_zone_id(page) != zoneid)
+ break;
+
+ get_page(page);
+ /*
+ * Increase the address that will be returned *before* the
+ * eventual break due to pvec becoming full by adding the page
+ */
+ start += PAGE_SIZE;
+ if (pagevec_add(pvec, page) == 0)
+ break;
+ }
+ pte_unmap_unlock(pte, ptl);
+ return start;
+}
+
+/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
* @start - start address in @vma of the range
@@ -233,9 +435,13 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_LOCKED;
while (start < end) {
- struct page *page;
+ struct page *page = NULL;
unsigned int page_mask, page_increm;
+ struct pagevec pvec;
+ struct zone *zone;
+ int zoneid;
+ pagevec_init(&pvec, 0);
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -244,21 +450,45 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
* has sneaked into the range, we won't oops here: great).
*/
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
- &page_mask);
+ &page_mask);
+
if (page && !IS_ERR(page)) {
- lock_page(page);
- lru_add_drain();
- /*
- * Any THP page found by follow_page_mask() may have
- * gotten split before reaching munlock_vma_page(),
- * so we need to recompute the page_mask here.
- */
- page_mask = munlock_vma_page(page);
- unlock_page(page);
- put_page(page);
+ if (PageTransHuge(page)) {
+ lock_page(page);
+ /*
+ * Any THP page found by follow_page_mask() may
+ * have gotten split before reaching
+ * munlock_vma_page(), so we need to recompute
+ * the page_mask here.
+ */
+ page_mask = munlock_vma_page(page);
+ unlock_page(page);
+ put_page(page); /* follow_page_mask() */
+ } else {
+ /*
+ * Non-huge pages are handled in batches via
+ * pagevec. The pin from follow_page_mask()
+ * prevents them from collapsing by THP.
+ */
+ pagevec_add(&pvec, page);
+ zone = page_zone(page);
+ zoneid = page_zone_id(page);
+
+ /*
+ * Try to fill the rest of pagevec using fast
+ * pte walk. This will also update start to
+ * the next page to process. Then munlock the
+ * pagevec.
+ */
+ start = __munlock_pagevec_fill(&pvec, vma,
+ zoneid, start, end);
+ __munlock_pagevec(&pvec, zone);
+ goto next;
+ }
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
start += page_increm * PAGE_SIZE;
+next:
cond_resched();
}
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f9c97d10b87..9d548512ff8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1202,7 +1202,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long *populate)
{
struct mm_struct * mm = current->mm;
- struct inode *inode;
vm_flags_t vm_flags;
*populate = 0;
@@ -1265,9 +1264,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
return -EAGAIN;
}
- inode = file ? file_inode(file) : NULL;
-
if (file) {
+ struct inode *inode = file_inode(file);
+
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
@@ -1302,6 +1301,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (!file->f_op || !file->f_op->mmap)
return -ENODEV;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ return -EINVAL;
break;
default:
@@ -1310,6 +1311,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ return -EINVAL;
/*
* Ignore pgoff.
*/
@@ -1476,11 +1479,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- int correct_wcount = 0;
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
- struct inode *inode = file ? file_inode(file) : NULL;
/* Check against address space limit. */
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
@@ -1544,16 +1545,11 @@ munmap_back:
vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
-
if (file) {
- if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
- goto free_vma;
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
- correct_wcount = 1;
}
vma->vm_file = get_file(file);
error = file->f_op->mmap(file, vma);
@@ -1570,11 +1566,8 @@ munmap_back:
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
- pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
- if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
- goto free_vma;
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
@@ -1596,11 +1589,10 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
- file = vma->vm_file;
-
/* Once vma denies write, undo our temporary denial count */
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
+ if (vm_flags & VM_DENYWRITE)
+ allow_write_access(file);
+ file = vma->vm_file;
out:
perf_event_mmap(vma);
@@ -1616,11 +1608,20 @@ out:
if (file)
uprobe_mmap(vma);
+ /*
+ * New (or expanded) vma always get soft dirty status.
+ * Otherwise user-space soft-dirty page tracker won't
+ * be able to distinguish situation when vma area unmapped,
+ * then new mapped in-place (which must be aimed as
+ * a completely new data area).
+ */
+ vma->vm_flags |= VM_SOFTDIRTY;
+
return addr;
unmap_and_free_vma:
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
+ if (vm_flags & VM_DENYWRITE)
+ allow_write_access(file);
vma->vm_file = NULL;
fput(file);
@@ -2380,7 +2381,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
- struct mempolicy *pol;
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -2404,12 +2404,9 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
- pol = mpol_dup(vma_policy(vma));
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
+ err = vma_dup_policy(vma, new);
+ if (err)
goto out_free_vma;
- }
- vma_set_policy(new, pol);
if (anon_vma_clone(new, vma))
goto out_free_mpol;
@@ -2437,7 +2434,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
- mpol_put(pol);
+ mpol_put(vma_policy(new));
out_free_vma:
kmem_cache_free(vm_area_cachep, new);
out_err:
@@ -2663,6 +2660,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
+ vma->vm_flags |= VM_SOFTDIRTY;
return addr;
}
@@ -2780,7 +2778,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
- struct mempolicy *pol;
bool faulted_in_anon_vma = true;
/*
@@ -2825,10 +2822,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
- pol = mpol_dup(vma_policy(vma));
- if (IS_ERR(pol))
+ if (vma_dup_policy(vma, new_vma))
goto out_free_vma;
- vma_set_policy(new_vma, pol);
INIT_LIST_HEAD(&new_vma->anon_vma_chain);
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
@@ -2843,7 +2838,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return new_vma;
out_free_mempol:
- mpol_put(pol);
+ mpol_put(vma_policy(new_vma));
out_free_vma:
kmem_cache_free(vm_area_cachep, new_vma);
return NULL;
@@ -2930,7 +2925,7 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
- vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = &special_mapping_vmops;
diff --git a/mm/mremap.c b/mm/mremap.c
index 0843feb66f3..91b13d6a16d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,6 +25,7 @@
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
#include "internal.h"
@@ -62,8 +63,10 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
+ if (!pmd) {
+ pud_free(mm, pud);
return NULL;
+ }
VM_BUG_ON(pmd_trans_huge(*pmd));
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3f0c895c71f..6c7b0187be8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,8 +36,11 @@
#include <linux/pagevec.h>
#include <linux/timer.h>
#include <linux/sched/rt.h>
+#include <linux/mm_inline.h>
#include <trace/events/writeback.h>
+#include "internal.h"
+
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
*/
@@ -241,9 +244,6 @@ static unsigned long global_dirtyable_memory(void)
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
- /* Subtract min_free_kbytes */
- x -= min_t(unsigned long, x, min_free_kbytes >> (PAGE_SHIFT - 10));
-
return x + 1; /* Ensure that we never return 0 */
}
@@ -585,6 +585,37 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
}
/*
+ * setpoint - dirty 3
+ * f(dirty) := 1.0 + (----------------)
+ * limit - setpoint
+ *
+ * it's a 3rd order polynomial that subjects to
+ *
+ * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
+ * (2) f(setpoint) = 1.0 => the balance point
+ * (3) f(limit) = 0 => the hard limit
+ * (4) df/dx <= 0 => negative feedback control
+ * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
+ * => fast response on large errors; small oscillation near setpoint
+ */
+static inline long long pos_ratio_polynom(unsigned long setpoint,
+ unsigned long dirty,
+ unsigned long limit)
+{
+ long long pos_ratio;
+ long x;
+
+ x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ limit - setpoint + 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+ pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+
+ return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
+}
+
+/*
* Dirty position control.
*
* (o) global/bdi setpoints
@@ -682,26 +713,80 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
/*
* global setpoint
*
- * setpoint - dirty 3
- * f(dirty) := 1.0 + (----------------)
- * limit - setpoint
+ * See comment for pos_ratio_polynom().
+ */
+ setpoint = (freerun + limit) / 2;
+ pos_ratio = pos_ratio_polynom(setpoint, dirty, limit);
+
+ /*
+ * The strictlimit feature is a tool preventing mistrusted filesystems
+ * from growing a large number of dirty pages before throttling. For
+ * such filesystems balance_dirty_pages always checks bdi counters
+ * against bdi limits. Even if global "nr_dirty" is under "freerun".
+ * This is especially important for fuse which sets bdi->max_ratio to
+ * 1% by default. Without strictlimit feature, fuse writeback may
+ * consume arbitrary amount of RAM because it is accounted in
+ * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
*
- * it's a 3rd order polynomial that subjects to
+ * Here, in bdi_position_ratio(), we calculate pos_ratio based on
+ * two values: bdi_dirty and bdi_thresh. Let's consider an example:
+ * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
+ * limits are set by default to 10% and 20% (background and throttle).
+ * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
+ * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is
+ * about ~6K pages (as the average of background and throttle bdi
+ * limits). The 3rd order polynomial will provide positive feedback if
+ * bdi_dirty is under bdi_setpoint and vice versa.
*
- * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
- * (2) f(setpoint) = 1.0 => the balance point
- * (3) f(limit) = 0 => the hard limit
- * (4) df/dx <= 0 => negative feedback control
- * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
- * => fast response on large errors; small oscillation near setpoint
+ * Note, that we cannot use global counters in these calculations
+ * because we want to throttle process writing to a strictlimit BDI
+ * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
+ * in the example above).
*/
- setpoint = (freerun + limit) / 2;
- x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
- pos_ratio = x;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
- pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
- pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
+ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+ long long bdi_pos_ratio;
+ unsigned long bdi_bg_thresh;
+
+ if (bdi_dirty < 8)
+ return min_t(long long, pos_ratio * 2,
+ 2 << RATELIMIT_CALC_SHIFT);
+
+ if (bdi_dirty >= bdi_thresh)
+ return 0;
+
+ bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh);
+ bdi_setpoint = dirty_freerun_ceiling(bdi_thresh,
+ bdi_bg_thresh);
+
+ if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh)
+ return 0;
+
+ bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty,
+ bdi_thresh);
+
+ /*
+ * Typically, for strictlimit case, bdi_setpoint << setpoint
+ * and pos_ratio >> bdi_pos_ratio. In the other words global
+ * state ("dirty") is not limiting factor and we have to
+ * make decision based on bdi counters. But there is an
+ * important case when global pos_ratio should get precedence:
+ * global limits are exceeded (e.g. due to activities on other
+ * BDIs) while given strictlimit BDI is below limit.
+ *
+ * "pos_ratio * bdi_pos_ratio" would work for the case above,
+ * but it would look too non-natural for the case of all
+ * activity in the system coming from a single strictlimit BDI
+ * with bdi->max_ratio == 100%.
+ *
+ * Note that min() below somewhat changes the dynamics of the
+ * control system. Normally, pos_ratio value can be well over 3
+ * (when globally we are at freerun and bdi is well below bdi
+ * setpoint). Now the maximum pos_ratio in the same situation
+ * is 2. We might want to tweak this if we observe the control
+ * system is too slow to adapt.
+ */
+ return min(pos_ratio, bdi_pos_ratio);
+ }
/*
* We have computed basic pos_ratio above based on global situation. If
@@ -994,6 +1079,27 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* keep that period small to reduce time lags).
*/
step = 0;
+
+ /*
+ * For strictlimit case, calculations above were based on bdi counters
+ * and limits (starting from pos_ratio = bdi_position_ratio() and up to
+ * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
+ * Hence, to calculate "step" properly, we have to use bdi_dirty as
+ * "dirty" and bdi_setpoint as "setpoint".
+ *
+ * We rampup dirty_ratelimit forcibly if bdi_dirty is low because
+ * it's possible that bdi_thresh is close to zero due to inactivity
+ * of backing device (see the implementation of bdi_dirty_limit()).
+ */
+ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+ dirty = bdi_dirty;
+ if (bdi_dirty < 8)
+ setpoint = bdi_dirty + 1;
+ else
+ setpoint = (bdi_thresh +
+ bdi_dirty_limit(bdi, bg_thresh)) / 2;
+ }
+
if (dirty < setpoint) {
x = min(bdi->balanced_dirty_ratelimit,
min(balanced_dirty_ratelimit, task_ratelimit));
@@ -1198,6 +1304,56 @@ static long bdi_min_pause(struct backing_dev_info *bdi,
return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
}
+static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
+ unsigned long dirty_thresh,
+ unsigned long background_thresh,
+ unsigned long *bdi_dirty,
+ unsigned long *bdi_thresh,
+ unsigned long *bdi_bg_thresh)
+{
+ unsigned long bdi_reclaimable;
+
+ /*
+ * bdi_thresh is not treated as some limiting factor as
+ * dirty_thresh, due to reasons
+ * - in JBOD setup, bdi_thresh can fluctuate a lot
+ * - in a system with HDD and USB key, the USB key may somehow
+ * go into state (bdi_dirty >> bdi_thresh) either because
+ * bdi_dirty starts high, or because bdi_thresh drops low.
+ * In this case we don't want to hard throttle the USB key
+ * dirtiers for 100 seconds until bdi_dirty drops under
+ * bdi_thresh. Instead the auxiliary bdi control line in
+ * bdi_position_ratio() will let the dirtier task progress
+ * at some rate <= (write_bw / 2) for bringing down bdi_dirty.
+ */
+ *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+
+ if (bdi_bg_thresh)
+ *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
+ background_thresh,
+ dirty_thresh);
+
+ /*
+ * In order to avoid the stacked BDI deadlock we need
+ * to ensure we accurately count the 'dirty' pages when
+ * the threshold is low.
+ *
+ * Otherwise it would be possible to get thresh+n pages
+ * reported dirty, even though there are thresh-m pages
+ * actually dirty; with m+n sitting in the percpu
+ * deltas.
+ */
+ if (*bdi_thresh < 2 * bdi_stat_error(bdi)) {
+ bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+ *bdi_dirty = bdi_reclaimable +
+ bdi_stat_sum(bdi, BDI_WRITEBACK);
+ } else {
+ bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+ *bdi_dirty = bdi_reclaimable +
+ bdi_stat(bdi, BDI_WRITEBACK);
+ }
+}
+
/*
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
@@ -1209,13 +1365,9 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long pages_dirtied)
{
unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
- unsigned long bdi_reclaimable;
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
- unsigned long bdi_dirty;
- unsigned long freerun;
unsigned long background_thresh;
unsigned long dirty_thresh;
- unsigned long bdi_thresh;
long period;
long pause;
long max_pause;
@@ -1226,10 +1378,16 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long dirty_ratelimit;
unsigned long pos_ratio;
struct backing_dev_info *bdi = mapping->backing_dev_info;
+ bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
unsigned long start_time = jiffies;
for (;;) {
unsigned long now = jiffies;
+ unsigned long uninitialized_var(bdi_thresh);
+ unsigned long thresh;
+ unsigned long uninitialized_var(bdi_dirty);
+ unsigned long dirty;
+ unsigned long bg_thresh;
/*
* Unstable writes are a feature of certain networked
@@ -1243,61 +1401,44 @@ static void balance_dirty_pages(struct address_space *mapping,
global_dirty_limits(&background_thresh, &dirty_thresh);
+ if (unlikely(strictlimit)) {
+ bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+ &bdi_dirty, &bdi_thresh, &bg_thresh);
+
+ dirty = bdi_dirty;
+ thresh = bdi_thresh;
+ } else {
+ dirty = nr_dirty;
+ thresh = dirty_thresh;
+ bg_thresh = background_thresh;
+ }
+
/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
- * when the bdi limits are ramping up.
+ * when the bdi limits are ramping up in case of !strictlimit.
+ *
+ * In strictlimit case make decision based on the bdi counters
+ * and limits. Small writeouts when the bdi limits are ramping
+ * up are the price we consciously pay for strictlimit-ing.
*/
- freerun = dirty_freerun_ceiling(dirty_thresh,
- background_thresh);
- if (nr_dirty <= freerun) {
+ if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
current->dirty_paused_when = now;
current->nr_dirtied = 0;
current->nr_dirtied_pause =
- dirty_poll_interval(nr_dirty, dirty_thresh);
+ dirty_poll_interval(dirty, thresh);
break;
}
if (unlikely(!writeback_in_progress(bdi)))
bdi_start_background_writeback(bdi);
- /*
- * bdi_thresh is not treated as some limiting factor as
- * dirty_thresh, due to reasons
- * - in JBOD setup, bdi_thresh can fluctuate a lot
- * - in a system with HDD and USB key, the USB key may somehow
- * go into state (bdi_dirty >> bdi_thresh) either because
- * bdi_dirty starts high, or because bdi_thresh drops low.
- * In this case we don't want to hard throttle the USB key
- * dirtiers for 100 seconds until bdi_dirty drops under
- * bdi_thresh. Instead the auxiliary bdi control line in
- * bdi_position_ratio() will let the dirtier task progress
- * at some rate <= (write_bw / 2) for bringing down bdi_dirty.
- */
- bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
-
- /*
- * In order to avoid the stacked BDI deadlock we need
- * to ensure we accurately count the 'dirty' pages when
- * the threshold is low.
- *
- * Otherwise it would be possible to get thresh+n pages
- * reported dirty, even though there are thresh-m pages
- * actually dirty; with m+n sitting in the percpu
- * deltas.
- */
- if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
- bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
- bdi_dirty = bdi_reclaimable +
- bdi_stat_sum(bdi, BDI_WRITEBACK);
- } else {
- bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
- bdi_dirty = bdi_reclaimable +
- bdi_stat(bdi, BDI_WRITEBACK);
- }
+ if (!strictlimit)
+ bdi_dirty_limits(bdi, dirty_thresh, background_thresh,
+ &bdi_dirty, &bdi_thresh, NULL);
dirty_exceeded = (bdi_dirty > bdi_thresh) &&
- (nr_dirty > dirty_thresh);
+ ((nr_dirty > dirty_thresh) || strictlimit);
if (dirty_exceeded && !bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c2b59dbda19..0ee638f76eb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,6 +56,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/mm_inline.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
@@ -488,8 +489,10 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
* (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone.
*
- * For recording whether a page is in the buddy system, we set ->_mapcount -2.
- * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE.
+ * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
+ * serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
@@ -527,8 +530,9 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
* At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with _mapcount -2. Page's
- * order is recorded in page_private(page) field.
+ * free pages of length of (1 << order) and marked with _mapcount
+ * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
+ * field.
* So when we are allocating or freeing one, we can derive the state of the
* other. That is, if we allocate a small block, and both were
* free, the remainder of the region must be split into blocks.
@@ -647,7 +651,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
int to_free = count;
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (to_free) {
@@ -696,7 +699,6 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
@@ -721,7 +723,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
return false;
if (!PageHighMem(page)) {
- debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+ debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE << order);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
@@ -750,19 +753,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
+ struct page *p = page;
unsigned int loop;
- prefetchw(page);
- for (loop = 0; loop < nr_pages; loop++) {
- struct page *p = &page[loop];
-
- if (loop + 1 < nr_pages)
- prefetchw(p + 1);
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
- page_zone(page)->managed_pages += 1 << order;
+ page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
}
@@ -885,7 +888,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
unsigned int current_order;
- struct free_area * area;
+ struct free_area *area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
@@ -1007,14 +1010,60 @@ static void change_pageblock_range(struct page *pageblock_page,
}
}
+/*
+ * If breaking a large block of pages, move all free pages to the preferred
+ * allocation list. If falling back for a reclaimable kernel allocation, be
+ * more aggressive about taking ownership of free pages.
+ *
+ * On the other hand, never change migration type of MIGRATE_CMA pageblocks
+ * nor move CMA pages to different free lists. We don't want unmovable pages
+ * to be allocated from MIGRATE_CMA areas.
+ *
+ * Returns the new migratetype of the pageblock (or the same old migratetype
+ * if it was unchanged).
+ */
+static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ int start_type, int fallback_type)
+{
+ int current_order = page_order(page);
+
+ if (is_migrate_cma(fallback_type))
+ return fallback_type;
+
+ /* Take ownership for orders >= pageblock_order */
+ if (current_order >= pageblock_order) {
+ change_pageblock_range(page, current_order, start_type);
+ return start_type;
+ }
+
+ if (current_order >= pageblock_order / 2 ||
+ start_type == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled) {
+ int pages;
+
+ pages = move_freepages_block(zone, page, start_type);
+
+ /* Claim the whole block if over half of it is free */
+ if (pages >= (1 << (pageblock_order-1)) ||
+ page_group_by_mobility_disabled) {
+
+ set_pageblock_migratetype(page, start_type);
+ return start_type;
+ }
+
+ }
+
+ return fallback_type;
+}
+
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{
- struct free_area * area;
+ struct free_area *area;
int current_order;
struct page *page;
- int migratetype, i;
+ int migratetype, new_type, i;
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
@@ -1034,51 +1083,29 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
struct page, lru);
area->nr_free--;
- /*
- * If breaking a large block of pages, move all free
- * pages to the preferred allocation list. If falling
- * back for a reclaimable kernel allocation, be more
- * aggressive about taking ownership of free pages
- *
- * On the other hand, never change migration
- * type of MIGRATE_CMA pageblocks nor move CMA
- * pages on different free lists. We don't
- * want unmovable pages to be allocated from
- * MIGRATE_CMA areas.
- */
- if (!is_migrate_cma(migratetype) &&
- (current_order >= pageblock_order / 2 ||
- start_migratetype == MIGRATE_RECLAIMABLE ||
- page_group_by_mobility_disabled)) {
- int pages;
- pages = move_freepages_block(zone, page,
- start_migratetype);
-
- /* Claim the whole block if over half of it is free */
- if (pages >= (1 << (pageblock_order-1)) ||
- page_group_by_mobility_disabled)
- set_pageblock_migratetype(page,
- start_migratetype);
-
- migratetype = start_migratetype;
- }
+ new_type = try_to_steal_freepages(zone, page,
+ start_migratetype,
+ migratetype);
/* Remove the page from the freelists */
list_del(&page->lru);
rmv_page_order(page);
- /* Take ownership for orders >= pageblock_order */
- if (current_order >= pageblock_order &&
- !is_migrate_cma(migratetype))
- change_pageblock_range(page, current_order,
- start_migratetype);
-
+ /*
+ * Borrow the excess buddy pages as well, irrespective
+ * of whether we stole freepages, or took ownership of
+ * the pageblock or not.
+ *
+ * Exception: When borrowing from MIGRATE_CMA, release
+ * the excess buddy pages to CMA itself.
+ */
expand(zone, page, order, current_order, area,
is_migrate_cma(migratetype)
? migratetype : start_migratetype);
- trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, migratetype);
+ trace_mm_page_alloc_extfrag(page, order,
+ current_order, start_migratetype, migratetype,
+ new_type == start_migratetype);
return page;
}
@@ -1281,7 +1308,7 @@ void mark_free_pages(struct zone *zone)
int order, t;
struct list_head *curr;
- if (!zone->spanned_pages)
+ if (zone_is_empty(zone))
return;
spin_lock_irqsave(&zone->lock, flags);
@@ -1526,6 +1553,7 @@ again:
get_pageblock_migratetype(page));
}
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
@@ -1792,6 +1820,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
}
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+ return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+}
+
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
@@ -1829,6 +1862,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
{
}
+static bool zone_local(struct zone *local_zone, struct zone *zone)
+{
+ return true;
+}
+
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
return true;
@@ -1860,16 +1898,41 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
- * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) {
+ unsigned long mark;
+
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
+ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+ if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+ goto try_this_zone;
+ /*
+ * Distribute pages in proportion to the individual
+ * zone size to ensure fair page aging. The zone a
+ * page was allocated in should have no effect on the
+ * time the page has in memory before being reclaimed.
+ *
+ * When zone_reclaim_mode is enabled, try to stay in
+ * local zones in the fastpath. If that fails, the
+ * slowpath is entered, which will do another pass
+ * starting with the local zones, but ultimately fall
+ * back to remote zones that do not partake in the
+ * fairness round-robin cycle of this zonelist.
+ */
+ if (alloc_flags & ALLOC_WMARK_LOW) {
+ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ continue;
+ if (zone_reclaim_mode &&
+ !zone_local(preferred_zone, zone))
+ continue;
+ }
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
@@ -1900,16 +1963,11 @@ zonelist_scan:
(gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
goto this_zone_full;
- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
- if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
- unsigned long mark;
+ mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+ if (!zone_watermark_ok(zone, order, mark,
+ classzone_idx, alloc_flags)) {
int ret;
- mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
- if (zone_watermark_ok(zone, order, mark,
- classzone_idx, alloc_flags))
- goto try_this_zone;
-
if (IS_ENABLED(CONFIG_NUMA) &&
!did_zlc_setup && nr_online_nodes > 1) {
/*
@@ -2321,16 +2379,30 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
return page;
}
-static inline
-void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
- enum zone_type high_zoneidx,
- enum zone_type classzone_idx)
+static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist,
+ enum zone_type high_zoneidx,
+ struct zone *preferred_zone)
{
struct zoneref *z;
struct zone *zone;
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
- wakeup_kswapd(zone, order, classzone_idx);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ if (!(gfp_mask & __GFP_NO_KSWAPD))
+ wakeup_kswapd(zone, order, zone_idx(preferred_zone));
+ /*
+ * Only reset the batches of zones that were actually
+ * considered in the fast path, we don't want to
+ * thrash fairness information for zones that are not
+ * actually part of this zonelist's round-robin cycle.
+ */
+ if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+ continue;
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) -
+ low_wmark_pages(zone) -
+ zone_page_state(zone, NR_ALLOC_BATCH));
+ }
}
static inline int
@@ -2426,9 +2498,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage;
restart:
- if (!(gfp_mask & __GFP_NO_KSWAPD))
- wake_all_kswapd(order, zonelist, high_zoneidx,
- zone_idx(preferred_zone));
+ prepare_slowpath(gfp_mask, order, zonelist,
+ high_zoneidx, preferred_zone);
/*
* OK, we're below the kswapd watermark and have kicked background
@@ -3095,7 +3166,7 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
- (zone->all_unreclaimable ? "yes" : "no")
+ (!zone_reclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
@@ -3104,7 +3175,7 @@ void show_free_areas(unsigned int filter)
}
for_each_populated_zone(zone) {
- unsigned long nr[MAX_ORDER], flags, order, total = 0;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
unsigned char types[MAX_ORDER];
if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -3416,11 +3487,11 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
static int default_zonelist_order(void)
{
int nid, zone_type;
- unsigned long low_kmem_size,total_size;
+ unsigned long low_kmem_size, total_size;
struct zone *z;
int average_size;
/*
- * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
+ * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall
* into OOM very easily.
* This function detect ZONE_DMA/DMA32 size and configures zone order.
@@ -3452,9 +3523,9 @@ static int default_zonelist_order(void)
return ZONELIST_ORDER_NODE;
/*
* look into each node's config.
- * If there is a node whose DMA/DMA32 memory is very big area on
- * local memory, NODE_ORDER may be suitable.
- */
+ * If there is a node whose DMA/DMA32 memory is very big area on
+ * local memory, NODE_ORDER may be suitable.
+ */
average_size = total_size /
(nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) {
@@ -4180,7 +4251,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
if (!zone->wait_table)
return -ENOMEM;
- for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
+ for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
return 0;
@@ -4237,7 +4308,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
- int i, nid;
+ int nid;
/*
* NOTE: The following SMP-unsafe globals are only used early in boot
* when the kernel is running single-threaded.
@@ -4248,15 +4319,14 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
if (last_start_pfn <= pfn && pfn < last_end_pfn)
return last_nid;
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- if (start_pfn <= pfn && pfn < end_pfn) {
- last_start_pfn = start_pfn;
- last_end_pfn = end_pfn;
- last_nid = nid;
- return nid;
- }
- /* This is a memory hole */
- return -1;
+ nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+ if (nid != -1) {
+ last_start_pfn = start_pfn;
+ last_end_pfn = end_pfn;
+ last_nid = nid;
+ }
+
+ return nid;
}
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
@@ -4586,7 +4656,7 @@ static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
{
unsigned int order;
@@ -4614,7 +4684,7 @@ void __init set_pageblock_order(void)
* include/linux/pageblock-flags.h for the values of pageblock_order based on
* the kernel config
*/
-void __init set_pageblock_order(void)
+void __paginginit set_pageblock_order(void)
{
}
@@ -4728,8 +4798,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;
-
zone_pcp_init(zone);
+
+ /* For bootup, initialized properly in watermark setup */
+ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
+
lruvec_init(&zone->lruvec);
if (!size)
continue;
@@ -4930,7 +5003,7 @@ static unsigned long __init early_calculate_totalpages(void)
if (pages)
node_set_state(nid, N_MEMORY);
}
- return totalpages;
+ return totalpages;
}
/*
@@ -5047,7 +5120,7 @@ restart:
/*
* Some kernelcore has been met, update counts and
* break if the kernelcore for this node has been
- * satisified
+ * satisfied
*/
required_kernelcore -= min(required_kernelcore,
size_pages);
@@ -5061,7 +5134,7 @@ restart:
* If there is still required_kernelcore, we do another pass with one
* less node in the count. This will push zone_movable_pfn[nid] further
* along on the nodes that still have memory until kernelcore is
- * satisified
+ * satisfied
*/
usable_nodes--;
if (usable_nodes && required_kernelcore > usable_nodes)
@@ -5286,8 +5359,10 @@ void __init mem_init_print_info(const char *str)
* 3) .rodata.* may be embedded into .text or .data sections.
*/
#define adj_init_size(start, end, size, pos, adj) \
- if (start <= pos && pos < end && size > adj) \
- size -= adj;
+ do { \
+ if (start <= pos && pos < end && size > adj) \
+ size -= adj; \
+ } while (0)
adj_init_size(__init_begin, __init_end, init_data_size,
_sinittext, init_code_size);
@@ -5361,7 +5436,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
* This is only okay since the processor is dead and cannot
* race with what we are doing.
*/
- refresh_cpu_vm_stats(cpu);
+ cpu_vm_stats_fold(cpu);
}
return NOTIFY_OK;
}
@@ -5498,6 +5573,11 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) -
+ low_wmark_pages(zone) -
+ zone_page_state(zone, NR_ALLOC_BATCH));
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5570,7 +5650,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
* we want it large (64MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use
*
- * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+ * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
* min_free_kbytes = sqrt(lowmem_kbytes * 16)
*
* which yields
@@ -5614,11 +5694,11 @@ int __meminit init_per_zone_wmark_min(void)
module_init(init_per_zone_wmark_min)
/*
- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes
* changes.
*/
-int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
@@ -5682,8 +5762,8 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
/*
* percpu_pagelist_fraction - changes the pcp->high for each zone on each
- * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
- * can have before it gets flushed back to buddy allocator.
+ * cpu. It is the fraction of total pages in each zone that a hot per cpu
+ * pagelist can have before it gets flushed back to buddy allocator.
*/
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
@@ -5745,9 +5825,10 @@ void *__init alloc_large_system_hash(const char *tablename,
if (!numentries) {
/* round applicable memory size up to nearest megabyte */
numentries = nr_kernel_pages;
- numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
- numentries >>= 20 - PAGE_SHIFT;
- numentries <<= 20 - PAGE_SHIFT;
+
+ /* It isn't necessary when PAGE_SIZE >= 1MB */
+ if (PAGE_SHIFT < 20)
+ numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
/* limit to 1 bucket per 2^scale bytes of low memory */
if (scale > PAGE_SHIFT)
@@ -5900,7 +5981,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
* This function checks whether pageblock includes unmovable pages or not.
* If @count is not zero, it is okay to include less @count unmovable pages
*
- * PageLRU check wihtout isolation or lru_lock could race so that
+ * PageLRU check without isolation or lru_lock could race so that
* MIGRATE_MOVABLE block might include unmovable pages. It means you can't
* expect this function should be exact.
*/
@@ -5928,6 +6009,17 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
continue;
page = pfn_to_page(check);
+
+ /*
+ * Hugepages are not in LRU lists, but they're movable.
+ * We need not scan over tail pages bacause we don't
+ * handle each tail page individually in migration.
+ */
+ if (PageHuge(page)) {
+ iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+ continue;
+ }
+
/*
* We can't use page_count without pin a page
* because another CPU can free compound page.
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 0cee10ffb98..d1473b2e948 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -6,6 +6,7 @@
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
#include <linux/memory.h>
+#include <linux/hugetlb.h>
#include "internal.h"
int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
@@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+ /*
+ * TODO: allocate a destination hugepage from a nearest neighbor node,
+ * accordance with memory policy of the user process if possible. For
+ * now as a simple work-around, we use the next node for destination.
+ */
+ if (PageHuge(page)) {
+ nodemask_t src = nodemask_of_node(page_to_nid(page));
+ nodemask_t dst;
+ nodes_complement(dst, src);
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ next_node(page_to_nid(page), dst));
+ }
+
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index e1a6e4fab01..3929a40bd6c 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -10,6 +10,30 @@
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>
+/*
+ * If a p?d_bad entry is found while walking page tables, report
+ * the error, before resetting entry to p?d_none. Usually (but
+ * very seldom) called out from the p?d_none_or_clear_bad macros.
+ */
+
+void pgd_clear_bad(pgd_t *pgd)
+{
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+}
+
+void pud_clear_bad(pud_t *pud)
+{
+ pud_ERROR(*pud);
+ pud_clear(pud);
+}
+
+void pmd_clear_bad(pmd_t *pmd)
+{
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+}
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Only sets the access flags (dirty, accessed), as well as write
diff --git a/mm/readahead.c b/mm/readahead.c
index 829a77c6283..e4ed0414978 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -371,10 +371,10 @@ static int try_context_readahead(struct address_space *mapping,
size = count_history_pages(mapping, ra, offset, max);
/*
- * no history pages:
+ * not enough history pages:
* it could be a random read
*/
- if (!size)
+ if (size <= req_size)
return 0;
/*
@@ -385,8 +385,8 @@ static int try_context_readahead(struct address_space *mapping,
size *= 2;
ra->start = offset;
- ra->size = get_init_ra_size(size + req_size, max);
- ra->async_size = ra->size;
+ ra->size = min(size + req_size, max);
+ ra->async_size = 1;
return 1;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 526149846d0..8297623fcae 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1205,7 +1205,7 @@ repeat:
gfp & GFP_RECLAIM_MASK);
if (error)
goto decused;
- error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+ error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
@@ -2819,6 +2819,10 @@ int __init shmem_init(void)
{
int error;
+ /* If rootfs called this, don't re-init */
+ if (shmem_inode_cachep)
+ return 0;
+
error = bdi_init(&shmem_backing_dev_info);
if (error)
goto out4;
diff --git a/mm/slub.c b/mm/slub.c
index e3ba1f2cf60..51df8272cfa 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4420,7 +4420,7 @@ static ssize_t order_store(struct kmem_cache *s,
unsigned long order;
int err;
- err = strict_strtoul(buf, 10, &order);
+ err = kstrtoul(buf, 10, &order);
if (err)
return err;
@@ -4448,7 +4448,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
unsigned long min;
int err;
- err = strict_strtoul(buf, 10, &min);
+ err = kstrtoul(buf, 10, &min);
if (err)
return err;
@@ -4468,7 +4468,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
unsigned long objects;
int err;
- err = strict_strtoul(buf, 10, &objects);
+ err = kstrtoul(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))
@@ -4784,7 +4784,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
unsigned long ratio;
int err;
- err = strict_strtoul(buf, 10, &ratio);
+ err = kstrtoul(buf, 10, &ratio);
if (err)
return err;
diff --git a/mm/sparse.c b/mm/sparse.c
index 308d50331bc..4ac1d7ef548 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -339,13 +339,14 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+static void __init sparse_early_usemaps_alloc_node(void *data,
unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long usemap_count, int nodeid)
{
void *usemap;
unsigned long pnum;
+ unsigned long **usemap_map = (unsigned long **)data;
int size = usemap_size();
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
@@ -430,11 +431,12 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+static void __init sparse_early_mem_maps_alloc_node(void *data,
unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long map_count, int nodeid)
{
+ struct page **map_map = (struct page **)data;
sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
map_count, nodeid);
}
@@ -460,6 +462,55 @@ void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
{
}
+/**
+ * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
+ * @map: usemap_map for pageblock flags or mmap_map for vmemmap
+ */
+static void __init alloc_usemap_and_memmap(void (*alloc_func)
+ (void *, unsigned long, unsigned long,
+ unsigned long, int), void *data)
+{
+ unsigned long pnum;
+ unsigned long map_count;
+ int nodeid_begin = 0;
+ unsigned long pnum_begin = 0;
+
+ for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid_begin = sparse_early_nid(ms);
+ pnum_begin = pnum;
+ break;
+ }
+ map_count = 1;
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ struct mem_section *ms;
+ int nodeid;
+
+ if (!present_section_nr(pnum))
+ continue;
+ ms = __nr_to_section(pnum);
+ nodeid = sparse_early_nid(ms);
+ if (nodeid == nodeid_begin) {
+ map_count++;
+ continue;
+ }
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+ alloc_func(data, pnum_begin, pnum,
+ map_count, nodeid_begin);
+ /* new start, update count etc*/
+ nodeid_begin = nodeid;
+ pnum_begin = pnum;
+ map_count = 1;
+ }
+ /* ok, last chunk */
+ alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
+ map_count, nodeid_begin);
+}
+
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
@@ -471,11 +522,7 @@ void __init sparse_init(void)
unsigned long *usemap;
unsigned long **usemap_map;
int size;
- int nodeid_begin = 0;
- unsigned long pnum_begin = 0;
- unsigned long usemap_count;
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
- unsigned long map_count;
int size2;
struct page **map_map;
#endif
@@ -501,82 +548,16 @@ void __init sparse_init(void)
usemap_map = alloc_bootmem(size);
if (!usemap_map)
panic("can not allocate usemap_map\n");
-
- for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
-
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid_begin = sparse_early_nid(ms);
- pnum_begin = pnum;
- break;
- }
- usemap_count = 1;
- for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
- int nodeid;
-
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid = sparse_early_nid(ms);
- if (nodeid == nodeid_begin) {
- usemap_count++;
- continue;
- }
- /* ok, we need to take cake of from pnum_begin to pnum - 1*/
- sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
- usemap_count, nodeid_begin);
- /* new start, update count etc*/
- nodeid_begin = nodeid;
- pnum_begin = pnum;
- usemap_count = 1;
- }
- /* ok, last chunk */
- sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
- usemap_count, nodeid_begin);
+ alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
+ (void *)usemap_map);
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
map_map = alloc_bootmem(size2);
if (!map_map)
panic("can not allocate map_map\n");
-
- for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
-
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid_begin = sparse_early_nid(ms);
- pnum_begin = pnum;
- break;
- }
- map_count = 1;
- for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
- struct mem_section *ms;
- int nodeid;
-
- if (!present_section_nr(pnum))
- continue;
- ms = __nr_to_section(pnum);
- nodeid = sparse_early_nid(ms);
- if (nodeid == nodeid_begin) {
- map_count++;
- continue;
- }
- /* ok, we need to take cake of from pnum_begin to pnum - 1*/
- sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
- map_count, nodeid_begin);
- /* new start, update count etc*/
- nodeid_begin = nodeid;
- pnum_begin = pnum;
- map_count = 1;
- }
- /* ok, last chunk */
- sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
- map_count, nodeid_begin);
+ alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
+ (void *)map_map);
#endif
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 62b78a6e224..c899502d3e3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
+#include <linux/hugetlb.h>
#include "internal.h"
@@ -81,6 +82,19 @@ static void __put_compound_page(struct page *page)
static void put_compound_page(struct page *page)
{
+ /*
+ * hugetlbfs pages cannot be split from under us. If this is a
+ * hugetlbfs page, check refcount on head page and release the page if
+ * the refcount becomes zero.
+ */
+ if (PageHuge(page)) {
+ page = compound_head(page);
+ if (put_page_testzero(page))
+ __put_compound_page(page);
+
+ return;
+ }
+
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page);
@@ -184,38 +198,51 @@ bool __get_page_tail(struct page *page)
* proper PT lock that already serializes against
* split_huge_page().
*/
- unsigned long flags;
bool got = false;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head;
- if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ /*
+ * If this is a hugetlbfs page it cannot be split under us. Simply
+ * increment refcount for the head page.
+ */
+ if (PageHuge(page)) {
+ page_head = compound_head(page);
+ atomic_inc(&page_head->_count);
+ got = true;
+ } else {
+ unsigned long flags;
+
+ page_head = compound_trans_head(page);
+ if (likely(page != page_head &&
+ get_page_unless_zero(page_head))) {
+
+ /* Ref to put_compound_page() comment. */
+ if (PageSlab(page_head)) {
+ if (likely(PageTail(page))) {
+ __get_page_tail_foll(page, false);
+ return true;
+ } else {
+ put_page(page_head);
+ return false;
+ }
+ }
- /* Ref to put_compound_page() comment. */
- if (PageSlab(page_head)) {
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+ * we obtain the lock. That is ok as long as it
+ * can't be freed from under us.
+ */
+ flags = compound_lock_irqsave(page_head);
+ /* here __split_huge_page_refcount won't run anymore */
if (likely(PageTail(page))) {
__get_page_tail_foll(page, false);
- return true;
- } else {
- put_page(page_head);
- return false;
+ got = true;
}
+ compound_unlock_irqrestore(page_head, flags);
+ if (unlikely(!got))
+ put_page(page_head);
}
-
- /*
- * page_head wasn't a dangling pointer but it
- * may not be a head page anymore by the time
- * we obtain the lock. That is ok as long as it
- * can't be freed from under us.
- */
- flags = compound_lock_irqsave(page_head);
- /* here __split_huge_page_refcount won't run anymore */
- if (likely(PageTail(page))) {
- __get_page_tail_foll(page, false);
- got = true;
- }
- compound_unlock_irqrestore(page_head, flags);
- if (unlikely(!got))
- put_page(page_head);
}
return got;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f24ab0dff55..e6f15f8ca2a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -122,7 +122,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
- error = radix_tree_preload(gfp_mask);
+ error = radix_tree_maybe_preload(gfp_mask);
if (!error) {
error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
@@ -328,7 +328,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* call radix_tree_preload() while we can wait.
*/
- err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+ err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6cf2e60983b..3963fc24fcc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -175,14 +175,296 @@ static void discard_swap_cluster(struct swap_info_struct *si,
}
}
-static int wait_for_discard(void *word)
+#define SWAPFILE_CLUSTER 256
+#define LATENCY_LIMIT 256
+
+static inline void cluster_set_flag(struct swap_cluster_info *info,
+ unsigned int flag)
{
- schedule();
- return 0;
+ info->flags = flag;
}
-#define SWAPFILE_CLUSTER 256
-#define LATENCY_LIMIT 256
+static inline unsigned int cluster_count(struct swap_cluster_info *info)
+{
+ return info->data;
+}
+
+static inline void cluster_set_count(struct swap_cluster_info *info,
+ unsigned int c)
+{
+ info->data = c;
+}
+
+static inline void cluster_set_count_flag(struct swap_cluster_info *info,
+ unsigned int c, unsigned int f)
+{
+ info->flags = f;
+ info->data = c;
+}
+
+static inline unsigned int cluster_next(struct swap_cluster_info *info)
+{
+ return info->data;
+}
+
+static inline void cluster_set_next(struct swap_cluster_info *info,
+ unsigned int n)
+{
+ info->data = n;
+}
+
+static inline void cluster_set_next_flag(struct swap_cluster_info *info,
+ unsigned int n, unsigned int f)
+{
+ info->flags = f;
+ info->data = n;
+}
+
+static inline bool cluster_is_free(struct swap_cluster_info *info)
+{
+ return info->flags & CLUSTER_FLAG_FREE;
+}
+
+static inline bool cluster_is_null(struct swap_cluster_info *info)
+{
+ return info->flags & CLUSTER_FLAG_NEXT_NULL;
+}
+
+static inline void cluster_set_null(struct swap_cluster_info *info)
+{
+ info->flags = CLUSTER_FLAG_NEXT_NULL;
+ info->data = 0;
+}
+
+/* Add a cluster to discard list and schedule it to do discard */
+static void swap_cluster_schedule_discard(struct swap_info_struct *si,
+ unsigned int idx)
+{
+ /*
+ * If scan_swap_map() can't find a free cluster, it will check
+ * si->swap_map directly. To make sure the discarding cluster isn't
+ * taken by scan_swap_map(), mark the swap entries bad (occupied). It
+ * will be cleared after discard
+ */
+ memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+ SWAP_MAP_BAD, SWAPFILE_CLUSTER);
+
+ if (cluster_is_null(&si->discard_cluster_head)) {
+ cluster_set_next_flag(&si->discard_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&si->discard_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail = cluster_next(&si->discard_cluster_tail);
+ cluster_set_next(&si->cluster_info[tail], idx);
+ cluster_set_next_flag(&si->discard_cluster_tail,
+ idx, 0);
+ }
+
+ schedule_work(&si->discard_work);
+}
+
+/*
+ * Doing discard actually. After a cluster discard is finished, the cluster
+ * will be added to free cluster list. caller should hold si->lock.
+*/
+static void swap_do_scheduled_discard(struct swap_info_struct *si)
+{
+ struct swap_cluster_info *info;
+ unsigned int idx;
+
+ info = si->cluster_info;
+
+ while (!cluster_is_null(&si->discard_cluster_head)) {
+ idx = cluster_next(&si->discard_cluster_head);
+
+ cluster_set_next_flag(&si->discard_cluster_head,
+ cluster_next(&info[idx]), 0);
+ if (cluster_next(&si->discard_cluster_tail) == idx) {
+ cluster_set_null(&si->discard_cluster_head);
+ cluster_set_null(&si->discard_cluster_tail);
+ }
+ spin_unlock(&si->lock);
+
+ discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
+ SWAPFILE_CLUSTER);
+
+ spin_lock(&si->lock);
+ cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&si->free_cluster_head)) {
+ cluster_set_next_flag(&si->free_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&si->free_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail;
+
+ tail = cluster_next(&si->free_cluster_tail);
+ cluster_set_next(&info[tail], idx);
+ cluster_set_next_flag(&si->free_cluster_tail,
+ idx, 0);
+ }
+ memset(si->swap_map + idx * SWAPFILE_CLUSTER,
+ 0, SWAPFILE_CLUSTER);
+ }
+}
+
+static void swap_discard_work(struct work_struct *work)
+{
+ struct swap_info_struct *si;
+
+ si = container_of(work, struct swap_info_struct, discard_work);
+
+ spin_lock(&si->lock);
+ swap_do_scheduled_discard(si);
+ spin_unlock(&si->lock);
+}
+
+/*
+ * The cluster corresponding to page_nr will be used. The cluster will be
+ * removed from free cluster list and its usage counter will be increased.
+ */
+static void inc_cluster_info_page(struct swap_info_struct *p,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+ unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+ if (!cluster_info)
+ return;
+ if (cluster_is_free(&cluster_info[idx])) {
+ VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
+ cluster_set_next_flag(&p->free_cluster_head,
+ cluster_next(&cluster_info[idx]), 0);
+ if (cluster_next(&p->free_cluster_tail) == idx) {
+ cluster_set_null(&p->free_cluster_tail);
+ cluster_set_null(&p->free_cluster_head);
+ }
+ cluster_set_count_flag(&cluster_info[idx], 0, 0);
+ }
+
+ VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
+ cluster_set_count(&cluster_info[idx],
+ cluster_count(&cluster_info[idx]) + 1);
+}
+
+/*
+ * The cluster corresponding to page_nr decreases one usage. If the usage
+ * counter becomes 0, which means no page in the cluster is in using, we can
+ * optionally discard the cluster and add it to free cluster list.
+ */
+static void dec_cluster_info_page(struct swap_info_struct *p,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
+{
+ unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+
+ if (!cluster_info)
+ return;
+
+ VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
+ cluster_set_count(&cluster_info[idx],
+ cluster_count(&cluster_info[idx]) - 1);
+
+ if (cluster_count(&cluster_info[idx]) == 0) {
+ /*
+ * If the swap is discardable, prepare discard the cluster
+ * instead of free it immediately. The cluster will be freed
+ * after discard.
+ */
+ if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
+ (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
+ swap_cluster_schedule_discard(p, idx);
+ return;
+ }
+
+ cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&p->free_cluster_head)) {
+ cluster_set_next_flag(&p->free_cluster_head, idx, 0);
+ cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+ } else {
+ unsigned int tail = cluster_next(&p->free_cluster_tail);
+ cluster_set_next(&cluster_info[tail], idx);
+ cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
+ }
+ }
+}
+
+/*
+ * It's possible scan_swap_map() uses a free cluster in the middle of free
+ * cluster list. Avoiding such abuse to avoid list corruption.
+ */
+static bool
+scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
+ unsigned long offset)
+{
+ struct percpu_cluster *percpu_cluster;
+ bool conflict;
+
+ offset /= SWAPFILE_CLUSTER;
+ conflict = !cluster_is_null(&si->free_cluster_head) &&
+ offset != cluster_next(&si->free_cluster_head) &&
+ cluster_is_free(&si->cluster_info[offset]);
+
+ if (!conflict)
+ return false;
+
+ percpu_cluster = this_cpu_ptr(si->percpu_cluster);
+ cluster_set_null(&percpu_cluster->index);
+ return true;
+}
+
+/*
+ * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
+ * might involve allocating a new cluster for current CPU too.
+ */
+static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
+ unsigned long *offset, unsigned long *scan_base)
+{
+ struct percpu_cluster *cluster;
+ bool found_free;
+ unsigned long tmp;
+
+new_cluster:
+ cluster = this_cpu_ptr(si->percpu_cluster);
+ if (cluster_is_null(&cluster->index)) {
+ if (!cluster_is_null(&si->free_cluster_head)) {
+ cluster->index = si->free_cluster_head;
+ cluster->next = cluster_next(&cluster->index) *
+ SWAPFILE_CLUSTER;
+ } else if (!cluster_is_null(&si->discard_cluster_head)) {
+ /*
+ * we don't have free cluster but have some clusters in
+ * discarding, do discard now and reclaim them
+ */
+ swap_do_scheduled_discard(si);
+ *scan_base = *offset = si->cluster_next;
+ goto new_cluster;
+ } else
+ return;
+ }
+
+ found_free = false;
+
+ /*
+ * Other CPUs can use our cluster if they can't find a free cluster,
+ * check if there is still free entry in the cluster
+ */
+ tmp = cluster->next;
+ while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
+ SWAPFILE_CLUSTER) {
+ if (!si->swap_map[tmp]) {
+ found_free = true;
+ break;
+ }
+ tmp++;
+ }
+ if (!found_free) {
+ cluster_set_null(&cluster->index);
+ goto new_cluster;
+ }
+ cluster->next = tmp + 1;
+ *offset = tmp;
+ *scan_base = tmp;
+}
static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned char usage)
@@ -191,7 +473,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned long scan_base;
unsigned long last_in_cluster = 0;
int latency_ration = LATENCY_LIMIT;
- int found_free_cluster = 0;
/*
* We try to cluster swap pages by allocating them sequentially
@@ -207,24 +488,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
si->flags += SWP_SCANNING;
scan_base = offset = si->cluster_next;
+ /* SSD algorithm */
+ if (si->cluster_info) {
+ scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+ goto checks;
+ }
+
if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
- if (si->flags & SWP_PAGE_DISCARD) {
- /*
- * Start range check on racing allocations, in case
- * they overlap the cluster we eventually decide on
- * (we scan without swap_lock to allow preemption).
- * It's hardly conceivable that cluster_nr could be
- * wrapped during our scan, but don't depend on it.
- */
- if (si->lowest_alloc)
- goto checks;
- si->lowest_alloc = si->max;
- si->highest_alloc = 0;
- }
+
spin_unlock(&si->lock);
/*
@@ -248,7 +523,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -269,7 +543,6 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- found_free_cluster = 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -281,10 +554,13 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
offset = scan_base;
spin_lock(&si->lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
- si->lowest_alloc = 0;
}
checks:
+ if (si->cluster_info) {
+ while (scan_swap_map_ssd_cluster_conflict(si, offset))
+ scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
+ }
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
@@ -317,62 +593,10 @@ checks:
si->highest_bit = 0;
}
si->swap_map[offset] = usage;
+ inc_cluster_info_page(si, si->cluster_info, offset);
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
- if (si->lowest_alloc) {
- /*
- * Only set when SWP_PAGE_DISCARD, and there's a scan
- * for a free cluster in progress or just completed.
- */
- if (found_free_cluster) {
- /*
- * To optimize wear-levelling, discard the
- * old data of the cluster, taking care not to
- * discard any of its pages that have already
- * been allocated by racing tasks (offset has
- * already stepped over any at the beginning).
- */
- if (offset < si->highest_alloc &&
- si->lowest_alloc <= last_in_cluster)
- last_in_cluster = si->lowest_alloc - 1;
- si->flags |= SWP_DISCARDING;
- spin_unlock(&si->lock);
-
- if (offset < last_in_cluster)
- discard_swap_cluster(si, offset,
- last_in_cluster - offset + 1);
-
- spin_lock(&si->lock);
- si->lowest_alloc = 0;
- si->flags &= ~SWP_DISCARDING;
-
- smp_mb(); /* wake_up_bit advises this */
- wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
-
- } else if (si->flags & SWP_DISCARDING) {
- /*
- * Delay using pages allocated by racing tasks
- * until the whole discard has been issued. We
- * could defer that delay until swap_writepage,
- * but it's easier to keep this self-contained.
- */
- spin_unlock(&si->lock);
- wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
- wait_for_discard, TASK_UNINTERRUPTIBLE);
- spin_lock(&si->lock);
- } else {
- /*
- * Note pages allocated by racing tasks while
- * scan for a free cluster is in progress, so
- * that its final discard can exclude them.
- */
- if (offset < si->lowest_alloc)
- si->lowest_alloc = offset;
- if (offset > si->highest_alloc)
- si->highest_alloc = offset;
- }
- }
return offset;
scan:
@@ -527,16 +751,16 @@ static struct swap_info_struct *swap_info_get(swp_entry_t entry)
return p;
bad_free:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
+ pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
+ pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
- printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
+ pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
- printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
+ pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
@@ -600,6 +824,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
/* free if no reference */
if (!usage) {
+ dec_cluster_info_page(p, p->cluster_info, offset);
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
@@ -1107,7 +1332,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
else
continue;
}
- count = si->swap_map[i];
+ count = ACCESS_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
}
@@ -1127,7 +1352,11 @@ int try_to_unuse(unsigned int type, bool frontswap,
{
struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
- unsigned char *swap_map;
+ volatile unsigned char *swap_map; /* swap_map is accessed without
+ * locking. Mark it as volatile
+ * to prevent compiler doing
+ * something odd.
+ */
unsigned char swcount;
struct page *page;
swp_entry_t entry;
@@ -1178,7 +1407,15 @@ int try_to_unuse(unsigned int type, bool frontswap,
* reused since sys_swapoff() already disabled
* allocation from here, or alloc_page() failed.
*/
- if (!*swap_map)
+ swcount = *swap_map;
+ /*
+ * We don't hold lock here, so the swap entry could be
+ * SWAP_MAP_BAD (when the cluster is discarding).
+ * Instead of fail out, We can just skip the swap
+ * entry because swapoff will wait for discarding
+ * finish anyway.
+ */
+ if (!swcount || swcount == SWAP_MAP_BAD)
continue;
retval = -ENOMEM;
break;
@@ -1524,7 +1761,8 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
}
static void _enable_swap_info(struct swap_info_struct *p, int prio,
- unsigned char *swap_map)
+ unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info)
{
int i, prev;
@@ -1533,6 +1771,7 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
else
p->prio = --least_priority;
p->swap_map = swap_map;
+ p->cluster_info = cluster_info;
p->flags |= SWP_WRITEOK;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
@@ -1553,12 +1792,13 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
static void enable_swap_info(struct swap_info_struct *p, int prio,
unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info,
unsigned long *frontswap_map)
{
frontswap_init(p->type, frontswap_map);
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, prio, swap_map);
+ _enable_swap_info(p, prio, swap_map, cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1567,7 +1807,7 @@ static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, p->prio, p->swap_map);
+ _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -1576,6 +1816,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
+ struct swap_cluster_info *cluster_info;
unsigned long *frontswap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
@@ -1651,6 +1892,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
goto out_dput;
}
+ flush_work(&p->discard_work);
+
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
@@ -1675,6 +1918,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
+ cluster_info = p->cluster_info;
+ p->cluster_info = NULL;
p->flags = 0;
frontswap_map = frontswap_map_get(p);
frontswap_map_set(p, NULL);
@@ -1682,7 +1927,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
frontswap_invalidate_area(type);
mutex_unlock(&swapon_mutex);
+ free_percpu(p->percpu_cluster);
+ p->percpu_cluster = NULL;
vfree(swap_map);
+ vfree(cluster_info);
vfree(frontswap_map);
/* Destroy swap account informatin */
swap_cgroup_swapoff(type);
@@ -1926,9 +2174,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
int i;
unsigned long maxpages;
unsigned long swapfilepages;
+ unsigned long last_page;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
- printk(KERN_ERR "Unable to find swap-space signature\n");
+ pr_err("Unable to find swap-space signature\n");
return 0;
}
@@ -1942,9 +2191,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
- printk(KERN_WARNING
- "Unable to handle swap header version %d\n",
- swap_header->info.version);
+ pr_warn("Unable to handle swap header version %d\n",
+ swap_header->info.version);
return 0;
}
@@ -1968,8 +2216,14 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
- if (maxpages > swap_header->info.last_page) {
- maxpages = swap_header->info.last_page + 1;
+ last_page = swap_header->info.last_page;
+ if (last_page > maxpages) {
+ pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
+ maxpages << (PAGE_SHIFT - 10),
+ last_page << (PAGE_SHIFT - 10));
+ }
+ if (maxpages > last_page) {
+ maxpages = last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
@@ -1980,8 +2234,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
- printk(KERN_WARNING
- "Swap area shorter than signature indicates\n");
+ pr_warn("Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
@@ -1995,15 +2248,23 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
static int setup_swap_map_and_extents(struct swap_info_struct *p,
union swap_header *swap_header,
unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info,
unsigned long maxpages,
sector_t *span)
{
int i;
unsigned int nr_good_pages;
int nr_extents;
+ unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
nr_good_pages = maxpages - 1; /* omit header page */
+ cluster_set_null(&p->free_cluster_head);
+ cluster_set_null(&p->free_cluster_tail);
+ cluster_set_null(&p->discard_cluster_head);
+ cluster_set_null(&p->discard_cluster_tail);
+
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -2011,11 +2272,25 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
+ /*
+ * Haven't marked the cluster free yet, no list
+ * operation involved
+ */
+ inc_cluster_info_page(p, cluster_info, page_nr);
}
}
+ /* Haven't marked the cluster free yet, no list operation involved */
+ for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+ inc_cluster_info_page(p, cluster_info, i);
+
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
+ /*
+ * Not mark the cluster free yet, no list
+ * operation involved
+ */
+ inc_cluster_info_page(p, cluster_info, 0);
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, span);
@@ -2024,10 +2299,34 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
- printk(KERN_WARNING "Empty swap-file\n");
+ pr_warn("Empty swap-file\n");
return -EINVAL;
}
+ if (!cluster_info)
+ return nr_extents;
+
+ for (i = 0; i < nr_clusters; i++) {
+ if (!cluster_count(&cluster_info[idx])) {
+ cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
+ if (cluster_is_null(&p->free_cluster_head)) {
+ cluster_set_next_flag(&p->free_cluster_head,
+ idx, 0);
+ cluster_set_next_flag(&p->free_cluster_tail,
+ idx, 0);
+ } else {
+ unsigned int tail;
+
+ tail = cluster_next(&p->free_cluster_tail);
+ cluster_set_next(&cluster_info[tail], idx);
+ cluster_set_next_flag(&p->free_cluster_tail,
+ idx, 0);
+ }
+ }
+ idx++;
+ if (idx == nr_clusters)
+ idx = 0;
+ }
return nr_extents;
}
@@ -2059,6 +2358,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
+ struct swap_cluster_info *cluster_info = NULL;
unsigned long *frontswap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
@@ -2073,6 +2373,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (IS_ERR(p))
return PTR_ERR(p);
+ INIT_WORK(&p->discard_work, swap_discard_work);
+
name = getname(specialfile);
if (IS_ERR(name)) {
error = PTR_ERR(name);
@@ -2132,13 +2434,38 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -ENOMEM;
goto bad_swap;
}
+ if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+ p->flags |= SWP_SOLIDSTATE;
+ /*
+ * select a random position to start with to help wear leveling
+ * SSD
+ */
+ p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
+
+ cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
+ SWAPFILE_CLUSTER) * sizeof(*cluster_info));
+ if (!cluster_info) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ p->percpu_cluster = alloc_percpu(struct percpu_cluster);
+ if (!p->percpu_cluster) {
+ error = -ENOMEM;
+ goto bad_swap;
+ }
+ for_each_possible_cpu(i) {
+ struct percpu_cluster *cluster;
+ cluster = per_cpu_ptr(p->percpu_cluster, i);
+ cluster_set_null(&cluster->index);
+ }
+ }
error = swap_cgroup_swapon(p->type, maxpages);
if (error)
goto bad_swap;
nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
- maxpages, &span);
+ cluster_info, maxpages, &span);
if (unlikely(nr_extents < 0)) {
error = nr_extents;
goto bad_swap;
@@ -2147,41 +2474,33 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (frontswap_enabled)
frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
- if (p->bdev) {
- if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
- }
-
- if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
- /*
- * When discard is enabled for swap with no particular
- * policy flagged, we set all swap discard flags here in
- * order to sustain backward compatibility with older
- * swapon(8) releases.
- */
- p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
- SWP_PAGE_DISCARD);
+ if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
+ /*
+ * When discard is enabled for swap with no particular
+ * policy flagged, we set all swap discard flags here in
+ * order to sustain backward compatibility with older
+ * swapon(8) releases.
+ */
+ p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
+ SWP_PAGE_DISCARD);
- /*
- * By flagging sys_swapon, a sysadmin can tell us to
- * either do single-time area discards only, or to just
- * perform discards for released swap page-clusters.
- * Now it's time to adjust the p->flags accordingly.
- */
- if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
- p->flags &= ~SWP_PAGE_DISCARD;
- else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
- p->flags &= ~SWP_AREA_DISCARD;
-
- /* issue a swapon-time discard if it's still required */
- if (p->flags & SWP_AREA_DISCARD) {
- int err = discard_swap(p);
- if (unlikely(err))
- printk(KERN_ERR
- "swapon: discard_swap(%p): %d\n",
- p, err);
- }
+ /*
+ * By flagging sys_swapon, a sysadmin can tell us to
+ * either do single-time area discards only, or to just
+ * perform discards for released swap page-clusters.
+ * Now it's time to adjust the p->flags accordingly.
+ */
+ if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
+ p->flags &= ~SWP_PAGE_DISCARD;
+ else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
+ p->flags &= ~SWP_AREA_DISCARD;
+
+ /* issue a swapon-time discard if it's still required */
+ if (p->flags & SWP_AREA_DISCARD) {
+ int err = discard_swap(p);
+ if (unlikely(err))
+ pr_err("swapon: discard_swap(%p): %d\n",
+ p, err);
}
}
@@ -2190,9 +2509,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (swap_flags & SWAP_FLAG_PREFER)
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
- enable_swap_info(p, prio, swap_map, frontswap_map);
+ enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
- printk(KERN_INFO "Adding %uk swap on %s. "
+ pr_info("Adding %uk swap on %s. "
"Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
@@ -2211,6 +2530,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = 0;
goto out;
bad_swap:
+ free_percpu(p->percpu_cluster);
+ p->percpu_cluster = NULL;
if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
set_blocksize(p->bdev, p->old_block_size);
blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
@@ -2222,6 +2543,7 @@ bad_swap:
p->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
+ vfree(cluster_info);
if (swap_file) {
if (inode && S_ISREG(inode->i_mode)) {
mutex_unlock(&inode->i_mutex);
@@ -2291,6 +2613,16 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
goto unlock_out;
count = p->swap_map[offset];
+
+ /*
+ * swapin_readahead() doesn't check if a swap entry is valid, so the
+ * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+ */
+ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
+
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
err = 0;
@@ -2326,7 +2658,7 @@ out:
return err;
bad_file:
- printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
+ pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
diff --git a/mm/util.c b/mm/util.c
index 7441c41d00f..eaf63fc2c92 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -388,15 +388,12 @@ struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;
entry.val = page_private(page);
mapping = swap_address_space(entry);
- } else
-#endif
- if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+ } else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
mapping = NULL;
return mapping;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13a54953a27..107454312d5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -752,7 +752,6 @@ struct vmap_block_queue {
struct vmap_block {
spinlock_t lock;
struct vmap_area *va;
- struct vmap_block_queue *vbq;
unsigned long free, dirty;
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
struct list_head free_list;
@@ -830,7 +829,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
radix_tree_preload_end();
vbq = &get_cpu_var(vmap_block_queue);
- vb->vbq = vbq;
spin_lock(&vbq->lock);
list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -1018,15 +1016,16 @@ void vm_unmap_aliases(void)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i;
+ int i, j;
spin_lock(&vb->lock);
i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
- while (i < VMAP_BBMAP_BITS) {
+ if (i < VMAP_BBMAP_BITS) {
unsigned long s, e;
- int j;
- j = find_next_zero_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
+
+ j = find_last_bit(vb->dirty_map,
+ VMAP_BBMAP_BITS);
+ j = j + 1; /* need exclusive index */
s = vb->va->va_start + (i << PAGE_SHIFT);
e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1036,10 +1035,6 @@ void vm_unmap_aliases(void)
start = s;
if (e > end)
end = e;
-
- i = j;
- i = find_next_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
}
spin_unlock(&vb->lock);
}
@@ -1263,7 +1258,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
unsigned long addr = (unsigned long)area->addr;
- unsigned long end = addr + area->size - PAGE_SIZE;
+ unsigned long end = addr + get_vm_area_size(area);
int err;
err = vmap_page_range(addr, end, prot, *pages);
@@ -1558,7 +1553,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
unsigned int nr_pages, array_size, i;
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
+ nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
@@ -1990,7 +1985,7 @@ long vread(char *buf, char *addr, unsigned long count)
vm = va->vm;
vaddr = (char *) vm->addr;
- if (addr >= vaddr + vm->size - PAGE_SIZE)
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2000,7 +1995,7 @@ long vread(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + vm->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
if (!(vm->flags & VM_IOREMAP))
@@ -2072,7 +2067,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
vm = va->vm;
vaddr = (char *) vm->addr;
- if (addr >= vaddr + vm->size - PAGE_SIZE)
+ if (addr >= vaddr + get_vm_area_size(vm))
continue;
while (addr < vaddr) {
if (count == 0)
@@ -2081,7 +2076,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + vm->size - PAGE_SIZE - addr;
+ n = vaddr + get_vm_area_size(vm) - addr;
if (n > count)
n = count;
if (!(vm->flags & VM_IOREMAP)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cff0d491c6..fe715daeb8b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -146,6 +146,25 @@ static bool global_reclaim(struct scan_control *sc)
}
#endif
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (get_nr_swap_pages() > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
+}
+
+bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
if (!mem_cgroup_disabled())
@@ -545,7 +564,7 @@ int remove_mapping(struct address_space *mapping, struct page *page)
*/
void putback_lru_page(struct page *page)
{
- int lru;
+ bool is_unevictable;
int was_unevictable = PageUnevictable(page);
VM_BUG_ON(PageLRU(page));
@@ -560,14 +579,14 @@ redo:
* unevictable page on [in]active list.
* We know how to handle that.
*/
- lru = page_lru_base_type(page);
+ is_unevictable = false;
lru_cache_add(page);
} else {
/*
* Put unevictable pages directly on zone's unevictable
* list.
*/
- lru = LRU_UNEVICTABLE;
+ is_unevictable = true;
add_page_to_unevictable_list(page);
/*
* When racing with an mlock or AS_UNEVICTABLE clearing
@@ -587,7 +606,7 @@ redo:
* page is on unevictable list, it never be freed. To avoid that,
* check after we added it to the list, again.
*/
- if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
+ if (is_unevictable && page_evictable(page)) {
if (!isolate_lru_page(page)) {
put_page(page);
goto redo;
@@ -598,9 +617,9 @@ redo:
*/
}
- if (was_unevictable && lru != LRU_UNEVICTABLE)
+ if (was_unevictable && !is_unevictable)
count_vm_event(UNEVICTABLE_PGRESCUED);
- else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+ else if (!was_unevictable && is_unevictable)
count_vm_event(UNEVICTABLE_PGCULLED);
put_page(page); /* drop ref from isolate */
@@ -1789,7 +1808,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && zone->all_unreclaimable)
+ if (current_is_kswapd() && !zone_reclaimable(zone))
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
@@ -2244,8 +2263,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable &&
- sc->priority != DEF_PRIORITY)
+ if (sc->priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue; /* Let kswapd poll it */
if (IS_ENABLED(CONFIG_COMPACTION)) {
/*
@@ -2283,11 +2302,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
return aborted_reclaim;
}
-static bool zone_reclaimable(struct zone *zone)
-{
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
-}
-
/* All zones in zonelist are unreclaimable? */
static bool all_unreclaimable(struct zonelist *zonelist,
struct scan_control *sc)
@@ -2301,7 +2315,7 @@ static bool all_unreclaimable(struct zonelist *zonelist,
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (!zone->all_unreclaimable)
+ if (zone_reclaimable(zone))
return false;
}
@@ -2712,7 +2726,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
* DEF_PRIORITY. Effectively, it considers them balanced so
* they must be considered balanced here as well!
*/
- if (zone->all_unreclaimable) {
+ if (!zone_reclaimable(zone)) {
balanced_pages += zone->managed_pages;
continue;
}
@@ -2773,7 +2787,6 @@ static bool kswapd_shrink_zone(struct zone *zone,
unsigned long lru_pages,
unsigned long *nr_attempted)
{
- unsigned long nr_slab;
int testorder = sc->order;
unsigned long balance_gap;
struct reclaim_state *reclaim_state = current->reclaim_state;
@@ -2818,15 +2831,12 @@ static bool kswapd_shrink_zone(struct zone *zone,
shrink_zone(zone, sc);
reclaim_state->reclaimed_slab = 0;
- nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
+ shrink_slab(&shrink, sc->nr_scanned, lru_pages);
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
/* Account for the number of pages attempted to reclaim */
*nr_attempted += sc->nr_to_reclaim;
- if (nr_slab == 0 && !zone_reclaimable(zone))
- zone->all_unreclaimable = 1;
-
zone_clear_flag(zone, ZONE_WRITEBACK);
/*
@@ -2835,7 +2845,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* BDIs but as pressure is relieved, speculatively avoid congestion
* waits.
*/
- if (!zone->all_unreclaimable &&
+ if (zone_reclaimable(zone) &&
zone_balanced(zone, testorder, 0, classzone_idx)) {
zone_clear_flag(zone, ZONE_CONGESTED);
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
@@ -2901,8 +2911,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
/*
@@ -2980,8 +2990,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
sc.nr_scanned = 0;
@@ -3237,7 +3247,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
}
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+ if (zone_balanced(zone, order, 0, 0))
return;
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
@@ -3265,20 +3275,6 @@ unsigned long global_reclaimable_pages(void)
return nr;
}
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- int nr;
-
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
-
- if (get_nr_swap_pages() > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
-
- return nr;
-}
-
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
@@ -3576,7 +3572,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
- if (zone->all_unreclaimable)
+ if (!zone_reclaimable(zone))
return ZONE_RECLAIM_FULL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 20c2ef4458f..9bb31457791 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -19,6 +19,9 @@
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
+#include <linux/mm_inline.h>
+
+#include "internal.h"
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -414,12 +417,17 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
EXPORT_SYMBOL(dec_zone_page_state);
#endif
+static inline void fold_diff(int *diff)
+{
+ int i;
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (diff[i])
+ atomic_long_add(diff[i], &vm_stat[i]);
+}
+
/*
- * Update the zone counters for one cpu.
- *
- * The cpu specified must be either the current cpu or a processor that
- * is not online. If it is the current cpu then the execution thread must
- * be pinned to the current cpu.
+ * Update the zone counters for the current cpu.
*
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
@@ -432,33 +440,29 @@ EXPORT_SYMBOL(dec_zone_page_state);
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*/
-void refresh_cpu_vm_stats(int cpu)
+static void refresh_cpu_vm_stats(void)
{
struct zone *zone;
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) {
- struct per_cpu_pageset *p;
+ struct per_cpu_pageset __percpu *p = zone->pageset;
- p = per_cpu_ptr(zone->pageset, cpu);
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
+ int v;
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (p->vm_stat_diff[i]) {
- unsigned long flags;
- int v;
+ v = this_cpu_xchg(p->vm_stat_diff[i], 0);
+ if (v) {
- local_irq_save(flags);
- v = p->vm_stat_diff[i];
- p->vm_stat_diff[i] = 0;
- local_irq_restore(flags);
atomic_long_add(v, &zone->vm_stat[i]);
global_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
- p->expire = 3;
+ __this_cpu_write(p->expire, 3);
#endif
}
+ }
cond_resched();
#ifdef CONFIG_NUMA
/*
@@ -468,29 +472,57 @@ void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!p->expire || !p->pcp.count)
+ if (!__this_cpu_read(p->expire) ||
+ !__this_cpu_read(p->pcp.count))
continue;
/*
* We never drain zones local to this processor.
*/
if (zone_to_nid(zone) == numa_node_id()) {
- p->expire = 0;
+ __this_cpu_write(p->expire, 0);
continue;
}
- p->expire--;
- if (p->expire)
+
+ if (__this_cpu_dec_return(p->expire))
continue;
- if (p->pcp.count)
- drain_zone_pages(zone, &p->pcp);
+ if (__this_cpu_read(p->pcp.count))
+ drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
#endif
}
+ fold_diff(global_diff);
+}
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (global_diff[i])
- atomic_long_add(global_diff[i], &vm_stat[i]);
+/*
+ * Fold the data for an offline cpu into the global array.
+ * There cannot be any access by the offline cpu and therefore
+ * synchronization is simplified.
+ */
+void cpu_vm_stats_fold(int cpu)
+{
+ struct zone *zone;
+ int i;
+ int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
+
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *p;
+
+ p = per_cpu_ptr(zone->pageset, cpu);
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (p->vm_stat_diff[i]) {
+ int v;
+
+ v = p->vm_stat_diff[i];
+ p->vm_stat_diff[i] = 0;
+ atomic_long_add(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
+ }
+ }
+
+ fold_diff(global_diff);
}
/*
@@ -703,6 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
const char * const vmstat_text[] = {
/* Zoned VM counters */
"nr_free_pages",
+ "nr_alloc_batch",
"nr_inactive_anon",
"nr_active_anon",
"nr_inactive_file",
@@ -817,6 +850,12 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif
+#ifdef CONFIG_SMP
+ "nr_tlb_remote_flush",
+ "nr_tlb_remote_flush_received",
+#endif
+ "nr_tlb_local_flush_all",
+ "nr_tlb_local_flush_one",
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
@@ -1052,7 +1091,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n all_unreclaimable: %u"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
- zone->all_unreclaimable,
+ !zone_reclaimable(zone),
zone->zone_start_pfn,
zone->inactive_ratio);
seq_putc(m, '\n');
@@ -1177,7 +1216,7 @@ int sysctl_stat_interval __read_mostly = HZ;
static void vmstat_update(struct work_struct *w)
{
- refresh_cpu_vm_stats(smp_processor_id());
+ refresh_cpu_vm_stats();
schedule_delayed_work(&__get_cpu_var(vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
diff --git a/mm/zbud.c b/mm/zbud.c
index ad1e781284f..9451361e6aa 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -16,7 +16,7 @@
*
* zbud works by storing compressed pages, or "zpages", together in pairs in a
* single memory page called a "zbud page". The first buddy is "left
- * justifed" at the beginning of the zbud page, and the last buddy is "right
+ * justified" at the beginning of the zbud page, and the last buddy is "right
* justified" at the end of the zbud page. The benefit is that if either
* buddy is freed, the freed buddy space, coalesced with whatever slack space
* that existed between the buddies, results in the largest possible free region
@@ -243,7 +243,7 @@ void zbud_destroy_pool(struct zbud_pool *pool)
* gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
* as zbud pool pages.
*
- * Return: 0 if success and handle is set, otherwise -EINVAL is the size or
+ * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
* gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
* a new page.
*/
diff --git a/mm/zswap.c b/mm/zswap.c
index deda2b671e1..841e35f1db2 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -409,7 +409,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
{
struct page *found_page, *new_page = NULL;
- struct address_space *swapper_space = &swapper_spaces[swp_type(entry)];
+ struct address_space *swapper_space = swap_address_space(entry);
int err;
*retpage = NULL;
@@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
static void zswap_frontswap_invalidate_area(unsigned type)
{
struct zswap_tree *tree = zswap_trees[type];
- struct rb_node *node;
- struct zswap_entry *entry;
+ struct zswap_entry *entry, *n;
if (!tree)
return;
/* walk the tree and free everything */
spin_lock(&tree->lock);
- /*
- * TODO: Even though this code should not be executed because
- * the try_to_unuse() in swapoff should have emptied the tree,
- * it is very wasteful to rebalance the tree after every
- * removal when we are freeing the whole tree.
- *
- * If post-order traversal code is ever added to the rbtree
- * implementation, it should be used here.
- */
- while ((node = rb_first(&tree->rbroot))) {
- entry = rb_entry(node, struct zswap_entry, rbnode);
- rb_erase(&entry->rbnode, &tree->rbroot);
+ rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
zbud_free(tree->pool, entry->handle);
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
diff --git a/net/9p/client.c b/net/9p/client.c
index ba93bdab270..ee8fd6bd403 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -987,6 +987,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
{
int err;
struct p9_client *clnt;
+ char *client_id;
err = 0;
clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL);
@@ -995,6 +996,10 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
clnt->trans_mod = NULL;
clnt->trans = NULL;
+
+ client_id = utsname()->nodename;
+ memcpy(clnt->name, client_id, strlen(client_id) + 1);
+
spin_lock_init(&clnt->lock);
INIT_LIST_HEAD(&clnt->fidlist);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e1c26b10183..990afab2be1 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -577,6 +577,10 @@ static int p9_virtio_probe(struct virtio_device *vdev)
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
mutex_unlock(&virtio_9p_lock);
+
+ /* Let udev rules use the new mount_tag attribute. */
+ kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
+
return 0;
out_free_tag:
@@ -654,6 +658,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
list_del(&chan->chan_list);
mutex_unlock(&virtio_9p_lock);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
+ kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
kfree(chan->tag);
kfree(chan->vc_wq);
kfree(chan);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0ff42f029ac..1929af87b26 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -352,7 +352,7 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (queue_index != new_index && sk &&
rcu_access_pointer(sk->sk_dst_cache))
- sk_tx_queue_set(sk, queue_index);
+ sk_tx_queue_set(sk, new_index);
queue_index = new_index;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 136fe55c1a4..7c96100b021 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -915,6 +915,9 @@ static int __init inet6_init(void)
err = ip6_route_init();
if (err)
goto ip6_route_fail;
+ err = ndisc_late_init();
+ if (err)
+ goto ndisc_late_fail;
err = ip6_flowlabel_init();
if (err)
goto ip6_flowlabel_fail;
@@ -981,6 +984,8 @@ ipv6_exthdrs_fail:
addrconf_fail:
ip6_flowlabel_cleanup();
ip6_flowlabel_fail:
+ ndisc_late_cleanup();
+ndisc_late_fail:
ip6_route_cleanup();
ip6_route_fail:
#ifdef CONFIG_PROC_FS
@@ -1043,6 +1048,7 @@ static void __exit inet6_exit(void)
ipv6_exthdrs_exit();
addrconf_cleanup();
ip6_flowlabel_cleanup();
+ ndisc_late_cleanup();
ip6_route_cleanup();
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 07a7d65a7cb..8d67900aa00 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -162,12 +162,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
off += optlen;
len -= optlen;
}
- /* This case will not be caught by above check since its padding
- * length is smaller than 7:
- * 1 byte NH + 1 byte Length + 6 bytes Padding
- */
- if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
- goto bad;
if (len == 0)
return true;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index a6c58ce43d3..e27591635f9 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -138,8 +138,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
return false;
suppress_route:
- ip6_rt_put(rt);
- return true;
+ ip6_rt_put(rt);
+ return true;
}
static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 73db48eba1c..5bec666aba6 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -825,9 +825,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct rt6_info, rt6i_dst), allow_create,
replace_required);
-
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
+ fn = NULL;
goto out;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 12179457b2c..f8a55ff1971 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1727,24 +1727,28 @@ int __init ndisc_init(void)
if (err)
goto out_unregister_pernet;
#endif
- err = register_netdevice_notifier(&ndisc_netdev_notifier);
- if (err)
- goto out_unregister_sysctl;
out:
return err;
-out_unregister_sysctl:
#ifdef CONFIG_SYSCTL
- neigh_sysctl_unregister(&nd_tbl.parms);
out_unregister_pernet:
-#endif
unregister_pernet_subsys(&ndisc_net_ops);
goto out;
+#endif
}
-void ndisc_cleanup(void)
+int __init ndisc_late_init(void)
+{
+ return register_netdevice_notifier(&ndisc_netdev_notifier);
+}
+
+void ndisc_late_cleanup(void)
{
unregister_netdevice_notifier(&ndisc_netdev_notifier);
+}
+
+void ndisc_cleanup(void)
+{
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(&nd_tbl.parms);
#endif
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index fb36f856516..410db90db73 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1178,6 +1178,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
if (type > OVS_KEY_ATTR_MAX) {
OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
type, OVS_KEY_ATTR_MAX);
+ return -EINVAL;
}
if (attrs & (1 << type)) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2178b15ca6..863846cc551 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1495,7 +1495,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
- cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
+ cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
sch_tree_unlock(sch);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d5d5882a289..911b71b26b0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -806,6 +806,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
goto skip_mkasconf;
}
+ if (laddr == NULL)
+ return -EINVAL;
+
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
* setsockopt call.
@@ -6176,7 +6179,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* Is there any exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR |
- sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0;
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/socket.c b/net/socket.c
index b2d7c629eeb..0ceaa5cb9ea 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3072,12 +3072,12 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
uifmap32 = &uifr32->ifr_ifru.ifru_map;
err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
- err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
- err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
- err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
- err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
- err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
- err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
+ err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+ err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+ err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+ err |= get_user(ifr.ifr_map.irq, &uifmap32->irq);
+ err |= get_user(ifr.ifr_map.dma, &uifmap32->dma);
+ err |= get_user(ifr.ifr_map.port, &uifmap32->port);
if (err)
return -EFAULT;
@@ -3088,12 +3088,12 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
if (cmd == SIOCGIFMAP && !err) {
err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
- err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
- err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
- err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
- err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
- err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
- err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
+ err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+ err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+ err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+ err |= put_user(ifr.ifr_map.irq, &uifmap32->irq);
+ err |= put_user(ifr.ifr_map.dma, &uifmap32->dma);
+ err |= put_user(ifr.ifr_map.port, &uifmap32->port);
if (err)
err = -EFAULT;
}
@@ -3167,25 +3167,25 @@ static int routing_ioctl(struct net *net, struct socket *sock,
struct in6_rtmsg32 __user *ur6 = argp;
ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst),
3 * sizeof(struct in6_addr));
- ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
- ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
- ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
- ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
- ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
- ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
- ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+ ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
+ ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+ ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+ ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
+ ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
+ ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
+ ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
r = (void *) &r6;
} else { /* ipv4 */
struct rtentry32 __user *ur4 = argp;
ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst),
3 * sizeof(struct sockaddr));
- ret |= __get_user(r4.rt_flags, &(ur4->rt_flags));
- ret |= __get_user(r4.rt_metric, &(ur4->rt_metric));
- ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu));
- ret |= __get_user(r4.rt_window, &(ur4->rt_window));
- ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt));
- ret |= __get_user(rtdev, &(ur4->rt_dev));
+ ret |= get_user(r4.rt_flags, &(ur4->rt_flags));
+ ret |= get_user(r4.rt_metric, &(ur4->rt_metric));
+ ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu));
+ ret |= get_user(r4.rt_window, &(ur4->rt_window));
+ ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt));
+ ret |= get_user(rtdev, &(ur4->rt_dev));
if (rtdev) {
ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
r4.rt_dev = (char __user __force *)devname;
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index af7ffd447fe..f1eb0d16666 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -213,6 +213,26 @@ static int gssp_call(struct net *net, struct rpc_message *msg)
return status;
}
+static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+ int i;
+
+ for (i = 0; i < arg->npages && arg->pages[i]; i++)
+ __free_page(arg->pages[i]);
+}
+
+static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+ arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
+ arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
+ /*
+ * XXX: actual pages are allocated by xdr layer in
+ * xdr_partial_copy_from_skb.
+ */
+ if (!arg->pages)
+ return -ENOMEM;
+ return 0;
+}
/*
* Public functions
@@ -261,10 +281,16 @@ int gssp_accept_sec_context_upcall(struct net *net,
arg.context_handle = &ctxh;
res.output_token->len = GSSX_max_output_token_sz;
+ ret = gssp_alloc_receive_pages(&arg);
+ if (ret)
+ return ret;
+
/* use nfs/ for targ_name ? */
ret = gssp_call(net, &msg);
+ gssp_free_receive_pages(&arg);
+
/* we need to fetch all data even in case of error so
* that we can free special strctures is they have been allocated */
data->major_status = res.status.major_status;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 3c85d1c8a02..f0f78c5f1c7 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -166,14 +166,15 @@ static int dummy_dec_opt_array(struct xdr_stream *xdr,
return 0;
}
-static int get_s32(void **p, void *max, s32 *res)
+static int get_host_u32(struct xdr_stream *xdr, u32 *res)
{
- void *base = *p;
- void *next = (void *)((char *)base + sizeof(s32));
- if (unlikely(next > max || next < base))
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (!p)
return -EINVAL;
- memcpy(res, base, sizeof(s32));
- *p = next;
+ /* Contents of linux creds are all host-endian: */
+ memcpy(res, p, sizeof(u32));
return 0;
}
@@ -182,9 +183,9 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
{
u32 length;
__be32 *p;
- void *q, *end;
- s32 tmp;
- int N, i, err;
+ u32 tmp;
+ u32 N;
+ int i, err;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
@@ -192,33 +193,28 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
length = be32_to_cpup(p);
- /* FIXME: we do not want to use the scratch buffer for this one
- * may need to use functions that allows us to access an io vector
- * directly */
- p = xdr_inline_decode(xdr, length);
- if (unlikely(p == NULL))
+ if (length > (3 + NGROUPS_MAX) * sizeof(u32))
return -ENOSPC;
- q = p;
- end = q + length;
-
/* uid */
- err = get_s32(&q, end, &tmp);
+ err = get_host_u32(xdr, &tmp);
if (err)
return err;
creds->cr_uid = make_kuid(&init_user_ns, tmp);
/* gid */
- err = get_s32(&q, end, &tmp);
+ err = get_host_u32(xdr, &tmp);
if (err)
return err;
creds->cr_gid = make_kgid(&init_user_ns, tmp);
/* number of additional gid's */
- err = get_s32(&q, end, &tmp);
+ err = get_host_u32(xdr, &tmp);
if (err)
return err;
N = tmp;
+ if ((3 + N) * sizeof(u32) != length)
+ return -EINVAL;
creds->cr_group_info = groups_alloc(N);
if (creds->cr_group_info == NULL)
return -ENOMEM;
@@ -226,7 +222,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
/* gid's */
for (i = 0; i < N; i++) {
kgid_t kgid;
- err = get_s32(&q, end, &tmp);
+ err = get_host_u32(xdr, &tmp);
if (err)
goto out_free_groups;
err = -EINVAL;
@@ -784,6 +780,9 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
/* arg->options */
err = dummy_enc_opt_array(xdr, &arg->options);
+ xdr_inline_pages(&req->rq_rcv_buf,
+ PAGE_SIZE/2 /* pretty arbitrary */,
+ arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
done:
if (err)
dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err);
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
index 1c98b27d870..685a688f3d8 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
@@ -147,6 +147,8 @@ struct gssx_arg_accept_sec_context {
struct gssx_cb *input_cb;
u32 ret_deleg_cred;
struct gssx_option_array options;
+ struct page **pages;
+ unsigned int npages;
};
struct gssx_res_accept_sec_context {
@@ -240,7 +242,8 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
2 * GSSX_max_princ_sz + \
8 + 8 + 4 + 4 + 4)
#define GSSX_max_output_token_sz 1024
-#define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4)
+/* grouplist not included; we allocate separate pages for that: */
+#define GSSX_max_creds_sz (4 + 4 + 4 /* + NGROUPS_MAX*4 */)
#define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \
GSSX_default_ctx_sz + \
GSSX_max_output_token_sz + \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 2ee9eb75056..47016c304c8 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -31,12 +31,16 @@ my $show_types = 0;
my $fix = 0;
my $root;
my %debug;
-my %ignore_type = ();
my %camelcase = ();
+my %use_type = ();
+my @use = ();
+my %ignore_type = ();
my @ignore = ();
my $help = 0;
my $configuration_file = ".checkpatch.conf";
my $max_line_length = 80;
+my $ignore_perl_version = 0;
+my $minimum_perl_version = 5.10.0;
sub help {
my ($exitcode) = @_;
@@ -54,6 +58,7 @@ Options:
--terse one line per report
-f, --file treat FILE as regular source file
--subjective, --strict enable more subjective tests
+ --types TYPE(,TYPE2...) show only these comma separated message types
--ignore TYPE(,TYPE2...) ignore various comma separated message types
--max-line-length=n set the maximum line length, if exceeded, warn
--show-types show the message "types" in the output
@@ -71,6 +76,8 @@ Options:
"<inputfile>.EXPERIMENTAL-checkpatch-fixes"
with potential errors corrected to the preferred
checkpatch style
+ --ignore-perl-version override checking of perl version. expect
+ runtime errors.
-h, --help, --version display this help and exit
When FILE is - read standard input.
@@ -116,6 +123,7 @@ GetOptions(
'subjective!' => \$check,
'strict!' => \$check,
'ignore=s' => \@ignore,
+ 'types=s' => \@use,
'show-types!' => \$show_types,
'max-line-length=i' => \$max_line_length,
'root=s' => \$root,
@@ -123,6 +131,7 @@ GetOptions(
'mailback!' => \$mailback,
'summary-file!' => \$summary_file,
'fix!' => \$fix,
+ 'ignore-perl-version!' => \$ignore_perl_version,
'debug=s' => \%debug,
'test-only=s' => \$tst_only,
'h|help' => \$help,
@@ -133,24 +142,50 @@ help(0) if ($help);
my $exit = 0;
+if ($^V && $^V lt $minimum_perl_version) {
+ printf "$P: requires at least perl version %vd\n", $minimum_perl_version;
+ if (!$ignore_perl_version) {
+ exit(1);
+ }
+}
+
if ($#ARGV < 0) {
print "$P: no input files\n";
exit(1);
}
-@ignore = split(/,/, join(',',@ignore));
-foreach my $word (@ignore) {
- $word =~ s/\s*\n?$//g;
- $word =~ s/^\s*//g;
- $word =~ s/\s+/ /g;
- $word =~ tr/[a-z]/[A-Z]/;
+sub hash_save_array_words {
+ my ($hashRef, $arrayRef) = @_;
+
+ my @array = split(/,/, join(',', @$arrayRef));
+ foreach my $word (@array) {
+ $word =~ s/\s*\n?$//g;
+ $word =~ s/^\s*//g;
+ $word =~ s/\s+/ /g;
+ $word =~ tr/[a-z]/[A-Z]/;
+
+ next if ($word =~ m/^\s*#/);
+ next if ($word =~ m/^\s*$/);
- next if ($word =~ m/^\s*#/);
- next if ($word =~ m/^\s*$/);
+ $hashRef->{$word}++;
+ }
+}
- $ignore_type{$word}++;
+sub hash_show_words {
+ my ($hashRef, $prefix) = @_;
+
+ if ($quiet == 0 && keys %$hashRef) {
+ print "NOTE: $prefix message types:";
+ foreach my $word (sort keys %$hashRef) {
+ print " $word";
+ }
+ print "\n\n";
+ }
}
+hash_save_array_words(\%ignore_type, \@ignore);
+hash_save_array_words(\%use_type, \@use);
+
my $dbg_values = 0;
my $dbg_possible = 0;
my $dbg_type = 0;
@@ -207,6 +242,8 @@ our $Sparse = qr{
__rcu
}x;
+our $InitAttribute = qr{__(?:mem|cpu|dev|net_|)(?:initdata|initconst|init\b)};
+
# Notes to $Attribute:
# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
our $Attribute = qr{
@@ -227,7 +264,7 @@ our $Attribute = qr{
__deprecated|
__read_mostly|
__kprobes|
- __(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
+ $InitAttribute|
____cacheline_aligned|
____cacheline_aligned_in_smp|
____cacheline_internodealigned_in_smp|
@@ -257,6 +294,7 @@ our $Operators = qr{
}x;
our $NonptrType;
+our $NonptrTypeWithAttr;
our $Type;
our $Declare;
@@ -319,6 +357,12 @@ our @typeList = (
qr{${Ident}_handler},
qr{${Ident}_handler_fn},
);
+our @typeListWithAttr = (
+ @typeList,
+ qr{struct\s+$InitAttribute\s+$Ident},
+ qr{union\s+$InitAttribute\s+$Ident},
+);
+
our @modifierList = (
qr{fastcall},
);
@@ -332,6 +376,7 @@ our $allowed_asm_includes = qr{(?x:
sub build_types {
my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
+ my $allWithAttr = "(?x: \n" . join("|\n ", @typeListWithAttr) . "\n)";
$Modifier = qr{(?:$Attribute|$Sparse|$mods)};
$NonptrType = qr{
(?:$Modifier\s+|const\s+)*
@@ -342,6 +387,15 @@ sub build_types {
)
(?:\s+$Modifier|\s+const)*
}x;
+ $NonptrTypeWithAttr = qr{
+ (?:$Modifier\s+|const\s+)*
+ (?:
+ (?:typeof|__typeof__)\s*\([^\)]*\)|
+ (?:$typeTypedefs\b)|
+ (?:${allWithAttr}\b)
+ )
+ (?:\s+$Modifier|\s+const)*
+ }x;
$Type = qr{
$NonptrType
(?:(?:\s|\*|\[\])+\s*const|(?:\s|\*|\[\])+|(?:\s*\[\s*\])+)?
@@ -1355,7 +1409,9 @@ sub possible {
my $prefix = '';
sub show_type {
- return !defined $ignore_type{$_[0]};
+ return defined $use_type{$_[0]} if (scalar keys %use_type > 0);
+
+ return !defined $ignore_type{$_[0]};
}
sub report {
@@ -1435,7 +1491,23 @@ sub check_absolute_file {
sub trim {
my ($string) = @_;
- $string =~ s/(^\s+|\s+$)//g;
+ $string =~ s/^\s+|\s+$//g;
+
+ return $string;
+}
+
+sub ltrim {
+ my ($string) = @_;
+
+ $string =~ s/^\s+//;
+
+ return $string;
+}
+
+sub rtrim {
+ my ($string) = @_;
+
+ $string =~ s/\s+$//;
return $string;
}
@@ -1532,6 +1604,7 @@ sub process {
my %suppress_export;
my $suppress_statement = 0;
+ my %signatures = ();
# Pre-scan the patch sanitizing the lines.
# Pre-scan the patch looking for any __setup documentation.
@@ -1624,6 +1697,8 @@ sub process {
$linenr = 0;
foreach my $line (@lines) {
$linenr++;
+ my $sline = $line; #copy of $line
+ $sline =~ s/$;/ /g; #with comments as spaces
my $rawline = $rawlines[$linenr - 1];
@@ -1781,6 +1856,17 @@ sub process {
"email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
}
}
+
+# Check for duplicate signatures
+ my $sig_nospace = $line;
+ $sig_nospace =~ s/\s//g;
+ $sig_nospace = lc($sig_nospace);
+ if (defined $signatures{$sig_nospace}) {
+ WARN("BAD_SIGN_OFF",
+ "Duplicate signature\n" . $herecurr);
+ } else {
+ $signatures{$sig_nospace} = 1;
+ }
}
# Check for wrappage within a valid hunk of the file
@@ -1845,15 +1931,17 @@ sub process {
#trailing whitespace
if ($line =~ /^\+.*\015/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- ERROR("DOS_LINE_ENDINGS",
- "DOS line endings\n" . $herevet);
-
+ if (ERROR("DOS_LINE_ENDINGS",
+ "DOS line endings\n" . $herevet) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/[\s\015]+$//;
+ }
} elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
if (ERROR("TRAILING_WHITESPACE",
"trailing whitespace\n" . $herevet) &&
$fix) {
- $fixed[$linenr - 1] =~ s/^(\+.*?)\s+$/$1/;
+ $fixed[$linenr - 1] =~ s/\s+$//;
}
$rpt_cleaners = 1;
@@ -2060,6 +2148,7 @@ sub process {
if ($realfile =~ m@^(drivers/net/|net/)@ &&
$prevrawline =~ /^\+[ \t]*\/\*/ && #starting /*
$prevrawline !~ /\*\/[ \t]*$/ && #no trailing */
+ $rawline =~ /^\+/ && #line is new
$rawline !~ /^\+[ \t]*\*/) { #no leading *
WARN("NETWORKING_BLOCK_COMMENT_STYLE",
"networking block comments start with * on subsequent lines\n" . $hereprev);
@@ -2126,7 +2215,7 @@ sub process {
$realline_next);
#print "LINE<$line>\n";
if ($linenr >= $suppress_statement &&
- $realcnt && $line =~ /.\s*\S/) {
+ $realcnt && $sline =~ /.\s*\S/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0);
$stat =~ s/\n./\n /g;
@@ -2486,16 +2575,22 @@ sub process {
}
# check for global initialisers.
- if ($line =~ /^.$Type\s*$Ident\s*(?:\s+$Modifier)*\s*=\s*(0|NULL|false)\s*;/) {
- ERROR("GLOBAL_INITIALISERS",
- "do not initialise globals to 0 or NULL\n" .
- $herecurr);
+ if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
+ if (ERROR("GLOBAL_INITIALISERS",
+ "do not initialise globals to 0 or NULL\n" .
+ $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
+ }
}
# check for static initialisers.
- if ($line =~ /\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
- ERROR("INITIALISED_STATIC",
- "do not initialise statics to 0 or NULL\n" .
- $herecurr);
+ if ($line =~ /^\+.*\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
+ if (ERROR("INITIALISED_STATIC",
+ "do not initialise statics to 0 or NULL\n" .
+ $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/(\bstatic\s.*?)\s*=\s*(0|NULL|false)\s*;/$1;/;
+ }
}
# check for static const char * arrays.
@@ -2638,8 +2733,12 @@ sub process {
}
if ($line =~ /\bpr_warning\s*\(/) {
- WARN("PREFER_PR_LEVEL",
- "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+ if (WARN("PREFER_PR_LEVEL",
+ "Prefer pr_warn(... to pr_warning(...\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~
+ s/\bpr_warning\b/pr_warn/;
+ }
}
if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
@@ -2759,6 +2858,7 @@ sub process {
$off = 0;
my $blank = copy_spacing($opline);
+ my $last_after = -1;
for (my $n = 0; $n < $#elements; $n += 2) {
@@ -2824,7 +2924,7 @@ sub process {
$cc !~ /^\\/ && $cc !~ /^;/) {
if (ERROR("SPACING",
"space required after that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+ $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
$line_fixed = 1;
}
}
@@ -2839,11 +2939,11 @@ sub process {
if ($ctx =~ /Wx.|.xW/) {
if (ERROR("SPACING",
"spaces prohibited around that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
+ $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
+ $line_fixed = 1;
}
}
@@ -2852,8 +2952,9 @@ sub process {
if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
if (ERROR("SPACING",
"space required after that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " ";
+ $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
$line_fixed = 1;
+ $last_after = $n;
}
}
@@ -2870,8 +2971,10 @@ sub process {
if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
if (ERROR("SPACING",
"space required before that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
+ if ($n != $last_after + 2) {
+ $good = $fix_elements[$n] . " " . ltrim($fix_elements[$n + 1]);
+ $line_fixed = 1;
+ }
}
}
if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
@@ -2880,12 +2983,11 @@ sub process {
} elsif ($ctx =~ /.xW/) {
if (ERROR("SPACING",
"space prohibited after that '$op' $at\n" . $hereptr)) {
- $fixed_line =~ s/\s+$//;
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
+ $good = $fix_elements[$n] . rtrim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
+ $line_fixed = 1;
}
}
@@ -2894,8 +2996,7 @@ sub process {
if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
if (ERROR("SPACING",
"space required one side of that '$op' $at\n" . $hereptr)) {
- $fixed_line =~ s/\s+$//;
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]) . " ";
+ $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
$line_fixed = 1;
}
}
@@ -2903,20 +3004,18 @@ sub process {
($ctx =~ /Wx./ && $cc =~ /^;/)) {
if (ERROR("SPACING",
"space prohibited before that '$op' $at\n" . $hereptr)) {
- $fixed_line =~ s/\s+$//;
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+ $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
if ($ctx =~ /ExW/) {
if (ERROR("SPACING",
"space prohibited after that '$op' $at\n" . $hereptr)) {
- $fixed_line =~ s/\s+$//;
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
+ $good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
if (defined $fix_elements[$n + 2]) {
$fix_elements[$n + 2] =~ s/^\s+//;
}
+ $line_fixed = 1;
}
}
@@ -2930,8 +3029,10 @@ sub process {
if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
if (ERROR("SPACING",
"need consistent spacing around '$op' $at\n" . $hereptr)) {
- $fixed_line =~ s/\s+$//;
- $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+ $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+ if (defined $fix_elements[$n + 2]) {
+ $fix_elements[$n + 2] =~ s/^\s+//;
+ }
$line_fixed = 1;
}
}
@@ -2942,7 +3043,7 @@ sub process {
if ($ctx =~ /Wx./) {
if (ERROR("SPACING",
"space prohibited before that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
+ $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
$line_fixed = 1;
}
}
@@ -2969,8 +3070,10 @@ sub process {
if ($ok == 0) {
if (ERROR("SPACING",
"spaces required around that '$op' $at\n" . $hereptr)) {
- $good = trim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
- $good = $fix_elements[$n] . " " . trim($fix_elements[$n + 1]) . " ";
+ $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
+ if (defined $fix_elements[$n + 2]) {
+ $fix_elements[$n + 2] =~ s/^\s+//;
+ }
$line_fixed = 1;
}
}
@@ -3031,8 +3134,7 @@ sub process {
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
- $fixed[$linenr - 1] =~
- s/^(\+.*(?:do|\))){/$1 {/;
+ $fixed[$linenr - 1] =~ s/^(\+.*(?:do|\))){/$1 {/;
}
}
@@ -3047,8 +3149,12 @@ sub process {
# closing brace should have a space following it when it has anything
# on the line
if ($line =~ /}(?!(?:,|;|\)))\S/) {
- ERROR("SPACING",
- "space required after that close brace '}'\n" . $herecurr);
+ if (ERROR("SPACING",
+ "space required after that close brace '}'\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~
+ s/}((?!(?:,|;|\)))\S)/} $1/;
+ }
}
# check spacing on square brackets
@@ -3271,8 +3377,13 @@ sub process {
#gcc binary extension
if ($var =~ /^$Binary$/) {
- WARN("GCC_BINARY_CONSTANT",
- "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr);
+ if (WARN("GCC_BINARY_CONSTANT",
+ "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
+ $fix) {
+ my $hexval = sprintf("0x%x", oct($var));
+ $fixed[$linenr - 1] =~
+ s/\b$var\b/$hexval/;
+ }
}
#CamelCase
@@ -3282,19 +3393,26 @@ sub process {
$var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
#Ignore SI style variants like nS, mV and dB (ie: max_uV, regulator_min_uA_show)
$var !~ /^(?:[a-z_]*?)_?[a-z][A-Z](?:_[a-z_]+)?$/) {
- seed_camelcase_includes() if ($check);
- if (!defined $camelcase{$var}) {
- $camelcase{$var} = 1;
- CHK("CAMELCASE",
- "Avoid CamelCase: <$var>\n" . $herecurr);
+ while ($var =~ m{($Ident)}g) {
+ my $word = $1;
+ next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
+ seed_camelcase_includes() if ($check);
+ if (!defined $camelcase{$word}) {
+ $camelcase{$word} = 1;
+ CHK("CAMELCASE",
+ "Avoid CamelCase: <$word>\n" . $herecurr);
+ }
}
}
}
#no spaces allowed after \ in define
- if ($line=~/\#\s*define.*\\\s$/) {
- WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
- "Whitepspace after \\ makes next lines useless\n" . $herecurr);
+ if ($line =~ /\#\s*define.*\\\s+$/) {
+ if (WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
+ "Whitespace after \\ makes next lines useless\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\s+$//;
+ }
}
#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
@@ -3374,7 +3492,8 @@ sub process {
$dstat !~ /^for\s*$Constant$/ && # for (...)
$dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
$dstat !~ /^do\s*{/ && # do {...
- $dstat !~ /^\({/) # ({...
+ $dstat !~ /^\({/ && # ({...
+ $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
{
$ctx =~ s/\n*$//;
my $herectx = $here . "\n";
@@ -3606,6 +3725,32 @@ sub process {
}
}
+sub string_find_replace {
+ my ($string, $find, $replace) = @_;
+
+ $string =~ s/$find/$replace/g;
+
+ return $string;
+}
+
+# check for bad placement of section $InitAttribute (e.g.: __initdata)
+ if ($line =~ /(\b$InitAttribute\b)/) {
+ my $attr = $1;
+ if ($line =~ /^\+\s*static\s+(?:const\s+)?(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*[=;]/) {
+ my $ptr = $1;
+ my $var = $2;
+ if ((($ptr =~ /\b(union|struct)\s+$attr\b/ &&
+ ERROR("MISPLACED_INIT",
+ "$attr should be placed after $var\n" . $herecurr)) ||
+ ($ptr !~ /\b(union|struct)\s+$attr\b/ &&
+ WARN("MISPLACED_INIT",
+ "$attr should be placed after $var\n" . $herecurr))) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/(\bstatic\s+(?:const\s+)?)(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*([=;])\s*/"$1" . trim(string_find_replace($2, "\\s*$attr\\s*", " ")) . " " . trim(string_find_replace($3, "\\s*$attr\\s*", "")) . " $attr" . ("$4" eq ";" ? ";" : " = ")/e;
+ }
+ }
+ }
+
# prefer usleep_range over udelay
if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
# ignore udelay's < 10, however
@@ -3691,8 +3836,12 @@ sub process {
# Check for __inline__ and __inline, prefer inline
if ($line =~ /\b(__inline__|__inline)\b/) {
- WARN("INLINE",
- "plain inline is preferred over $1\n" . $herecurr);
+ if (WARN("INLINE",
+ "plain inline is preferred over $1\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\b(__inline__|__inline)\b/inline/;
+
+ }
}
# Check for __attribute__ packed, prefer __packed
@@ -3709,14 +3858,21 @@ sub process {
# Check for __attribute__ format(printf, prefer __printf
if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
- WARN("PREFER_PRINTF",
- "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr);
+ if (WARN("PREFER_PRINTF",
+ "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf\s*,\s*(.*)\)\s*\)\s*\)/"__printf(" . trim($1) . ")"/ex;
+
+ }
}
# Check for __attribute__ format(scanf, prefer __scanf
if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
- WARN("PREFER_SCANF",
- "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr);
+ if (WARN("PREFER_SCANF",
+ "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\s*,\s*(.*)\)\s*\)\s*\)/"__scanf(" . trim($1) . ")"/ex;
+ }
}
# check for sizeof(&)
@@ -3727,8 +3883,11 @@ sub process {
# check for sizeof without parenthesis
if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
- WARN("SIZEOF_PARENTHESIS",
- "sizeof $1 should be sizeof($1)\n" . $herecurr);
+ if (WARN("SIZEOF_PARENTHESIS",
+ "sizeof $1 should be sizeof($1)\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/"sizeof(" . trim($1) . ")"/ex;
+ }
}
# check for line continuations in quoted strings with odd counts of "
@@ -3747,8 +3906,11 @@ sub process {
if ($line =~ /\bseq_printf\s*\(/) {
my $fmt = get_quoted_string($line, $rawline);
if ($fmt !~ /[^\\]\%/) {
- WARN("PREFER_SEQ_PUTS",
- "Prefer seq_puts to seq_printf\n" . $herecurr);
+ if (WARN("PREFER_SEQ_PUTS",
+ "Prefer seq_puts to seq_printf\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\bseq_printf\b/seq_puts/;
+ }
}
}
@@ -3810,6 +3972,16 @@ sub process {
}
}
+# check for new externs in .h files.
+ if ($realfile =~ /\.h$/ &&
+ $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
+ if (WARN("AVOID_EXTERNS",
+ "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
+ }
+ }
+
# check for new externs in .c files.
if ($realfile =~ /\.c$/ && defined $stat &&
$stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
@@ -3879,8 +4051,11 @@ sub process {
# check for multiple semicolons
if ($line =~ /;\s*;\s*$/) {
- WARN("ONE_SEMICOLON",
- "Statements terminations use 1 semicolon\n" . $herecurr);
+ if (WARN("ONE_SEMICOLON",
+ "Statements terminations use 1 semicolon\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/(\s*;\s*){2,}$/;/g;
+ }
}
# check for switch/default statements without a break;
@@ -3898,9 +4073,12 @@ sub process {
}
# check for gcc specific __FUNCTION__
- if ($line =~ /__FUNCTION__/) {
- WARN("USE_FUNC",
- "__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr);
+ if ($line =~ /\b__FUNCTION__\b/) {
+ if (WARN("USE_FUNC",
+ "__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/\b__FUNCTION__\b/__func__/g;
+ }
}
# check for use of yield()
@@ -4105,13 +4283,8 @@ sub process {
}
}
- if ($quiet == 0 && keys %ignore_type) {
- print "NOTE: Ignored message types:";
- foreach my $ignore (sort keys %ignore_type) {
- print " $ignore";
- }
- print "\n\n";
- }
+ hash_show_words(\%use_type, "Used");
+ hash_show_words(\%ignore_type, "Ignored");
if ($clean == 0 && $fix && "@rawlines" ne "@fixed") {
my $newfile = $filename . ".EXPERIMENTAL-checkpatch-fixes";
diff --git a/scripts/config b/scripts/config
index 567120a87c3..2283be2bb62 100755
--- a/scripts/config
+++ b/scripts/config
@@ -62,15 +62,52 @@ checkarg() {
fi
}
+txt_append() {
+ local anchor="$1"
+ local insert="$2"
+ local infile="$3"
+ local tmpfile="$infile.swp"
+
+ # sed append cmd: 'a\' + newline + text + newline
+ cmd="$(printf "a\\%b$insert" "\n")"
+
+ sed -e "/$anchor/$cmd" "$infile" >"$tmpfile"
+ # replace original file with the edited one
+ mv "$tmpfile" "$infile"
+}
+
+txt_subst() {
+ local before="$1"
+ local after="$2"
+ local infile="$3"
+ local tmpfile="$infile.swp"
+
+ sed -e "s/$before/$after/" "$infile" >"$tmpfile"
+ # replace original file with the edited one
+ mv "$tmpfile" "$infile"
+}
+
+txt_delete() {
+ local text="$1"
+ local infile="$2"
+ local tmpfile="$infile.swp"
+
+ sed -e "/$text/d" "$infile" >"$tmpfile"
+ # replace original file with the edited one
+ mv "$tmpfile" "$infile"
+}
+
set_var() {
local name=$1 new=$2 before=$3
name_re="^($name=|# $name is not set)"
before_re="^($before=|# $before is not set)"
if test -n "$before" && grep -Eq "$before_re" "$FN"; then
- sed -ri "/$before_re/a $new" "$FN"
+ txt_append "^$before=" "$new" "$FN"
+ txt_append "^# $before is not set" "$new" "$FN"
elif grep -Eq "$name_re" "$FN"; then
- sed -ri "s:$name_re.*:$new:" "$FN"
+ txt_subst "^$name=.*" "$new" "$FN"
+ txt_subst "^# $name is not set" "$new" "$FN"
else
echo "$new" >>"$FN"
fi
@@ -79,7 +116,8 @@ set_var() {
undef_var() {
local name=$1
- sed -ri "/^($name=|# $name is not set)/d" "$FN"
+ txt_delete "^$name=" "$FN"
+ txt_delete "^# $name is not set" "$FN"
}
if [ "$1" = "--file" ]; then
diff --git a/scripts/diffconfig b/scripts/diffconfig
index b91f3e34d44..6d672836e18 100755
--- a/scripts/diffconfig
+++ b/scripts/diffconfig
@@ -10,7 +10,7 @@
import sys, os
def usage():
- print """Usage: diffconfig [-h] [-m] [<config1> <config2>]
+ print("""Usage: diffconfig [-h] [-m] [<config1> <config2>]
Diffconfig is a simple utility for comparing two .config files.
Using standard diff to compare .config files often includes extraneous and
@@ -33,7 +33,7 @@ Example usage:
EXT2_FS y -> n
LOG_BUF_SHIFT 14 -> 16
PRINTK_TIME n -> y
-"""
+""")
sys.exit(0)
# returns a dictionary of name/value pairs for config items in the file
@@ -54,23 +54,23 @@ def print_config(op, config, value, new_value):
if merge_style:
if new_value:
if new_value=="n":
- print "# CONFIG_%s is not set" % config
+ print("# CONFIG_%s is not set" % config)
else:
- print "CONFIG_%s=%s" % (config, new_value)
+ print("CONFIG_%s=%s" % (config, new_value))
else:
if op=="-":
- print "-%s %s" % (config, value)
+ print("-%s %s" % (config, value))
elif op=="+":
- print "+%s %s" % (config, new_value)
+ print("+%s %s" % (config, new_value))
else:
- print " %s %s -> %s" % (config, value, new_value)
+ print(" %s %s -> %s" % (config, value, new_value))
def main():
global merge_style
# parse command line args
if ("-h" in sys.argv or "--help" in sys.argv):
- usage()
+ usage()
merge_style = 0
if "-m" in sys.argv:
@@ -79,23 +79,27 @@ def main():
argc = len(sys.argv)
if not (argc==1 or argc == 3):
- print "Error: incorrect number of arguments or unrecognized option"
+ print("Error: incorrect number of arguments or unrecognized option")
usage()
if argc == 1:
# if no filenames given, assume .config and .config.old
build_dir=""
- if os.environ.has_key("KBUILD_OUTPUT"):
+ if "KBUILD_OUTPUT" in os.environ:
build_dir = os.environ["KBUILD_OUTPUT"]+"/"
-
configa_filename = build_dir + ".config.old"
configb_filename = build_dir + ".config"
else:
configa_filename = sys.argv[1]
configb_filename = sys.argv[2]
- a = readconfig(file(configa_filename))
- b = readconfig(file(configb_filename))
+ try:
+ a = readconfig(open(configa_filename))
+ b = readconfig(open(configb_filename))
+ except (IOError):
+ e = sys.exc_info()[1]
+ print("I/O error[%s]: %s\n" % (e.args[0],e.args[1]))
+ usage()
# print items in a but not b (accumulate, sort and print)
old = []
@@ -121,8 +125,7 @@ def main():
# now print items in b but not in a
# (items from b that were in a were removed above)
- new = b.keys()
- new.sort()
+ new = sorted(b.keys())
for config in new:
print_config("+", config, None, b[config])
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c55c227af46..87f72380407 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -140,7 +140,9 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
sym->flags |= def_flags;
break;
}
- conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+ if (def != S_DEF_AUTO)
+ conf_warning("symbol value '%s' invalid for %s",
+ p, sym->name);
return 1;
case S_OTHER:
if (*p != '"') {
@@ -161,7 +163,8 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
memmove(p2, p2 + 1, strlen(p2));
}
if (!p2) {
- conf_warning("invalid string found");
+ if (def != S_DEF_AUTO)
+ conf_warning("invalid string found");
return 1;
}
/* fall through */
@@ -172,7 +175,9 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
sym->def[def].val = strdup(p);
sym->flags |= def_flags;
} else {
- conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+ if (def != S_DEF_AUTO)
+ conf_warning("symbol value '%s' invalid for %s",
+ p, sym->name);
return 1;
}
break;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 6c9c45f9fbb..2c3963165a0 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -401,8 +401,8 @@ static void search_conf(void)
struct subtitle_part stpart;
title = str_new();
- str_printf( &title, _("Enter %s (sub)string or regexp to search for "
- "(with or without \"%s\")"), CONFIG_, CONFIG_);
+ str_printf( &title, _("Enter (sub)string or regexp to search for "
+ "(with or without \"%s\")"), CONFIG_);
again:
dialog_clear();
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 7e233a6ca64..c1d53200c30 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -197,12 +197,15 @@ void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep)
void menu_add_option(int token, char *arg)
{
- struct property *prop;
-
switch (token) {
case T_OPT_MODULES:
- prop = prop_alloc(P_DEFAULT, modules_sym);
- prop->expr = expr_alloc_symbol(current_entry->sym);
+ if (modules_sym)
+ zconf_error("symbol '%s' redefines option 'modules'"
+ " already defined by symbol '%s'",
+ current_entry->sym->name,
+ modules_sym->name
+ );
+ modules_sym = current_entry->sym;
break;
case T_OPT_DEFCONFIG_LIST:
if (!sym_defconfig_list)
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 7975d8d258c..4fbecd2473b 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -695,8 +695,8 @@ static void search_conf(void)
int dres;
title = str_new();
- str_printf( &title, _("Enter %s (sub)string or regexp to search for "
- "(with or without \"%s\")"), CONFIG_, CONFIG_);
+ str_printf( &title, _("Enter (sub)string or regexp to search for "
+ "(with or without \"%s\")"), CONFIG_);
again:
dres = dialog_inputbox(main_window,
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index d550300ec00..c9a6775565b 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -136,7 +136,7 @@ static struct property *sym_get_range_prop(struct symbol *sym)
return NULL;
}
-static long sym_get_range_val(struct symbol *sym, int base)
+static long long sym_get_range_val(struct symbol *sym, int base)
{
sym_calc_value(sym);
switch (sym->type) {
@@ -149,13 +149,14 @@ static long sym_get_range_val(struct symbol *sym, int base)
default:
break;
}
- return strtol(sym->curr.val, NULL, base);
+ return strtoll(sym->curr.val, NULL, base);
}
static void sym_validate_range(struct symbol *sym)
{
struct property *prop;
- long base, val, val2;
+ int base;
+ long long val, val2;
char str[64];
switch (sym->type) {
@@ -171,7 +172,7 @@ static void sym_validate_range(struct symbol *sym)
prop = sym_get_range_prop(sym);
if (!prop)
return;
- val = strtol(sym->curr.val, NULL, base);
+ val = strtoll(sym->curr.val, NULL, base);
val2 = sym_get_range_val(prop->expr->left.sym, base);
if (val >= val2) {
val2 = sym_get_range_val(prop->expr->right.sym, base);
@@ -179,9 +180,9 @@ static void sym_validate_range(struct symbol *sym)
return;
}
if (sym->type == S_INT)
- sprintf(str, "%ld", val2);
+ sprintf(str, "%lld", val2);
else
- sprintf(str, "0x%lx", val2);
+ sprintf(str, "0x%llx", val2);
sym->curr.val = strdup(str);
}
@@ -594,7 +595,7 @@ bool sym_string_valid(struct symbol *sym, const char *str)
bool sym_string_within_range(struct symbol *sym, const char *str)
{
struct property *prop;
- long val;
+ long long val;
switch (sym->type) {
case S_STRING:
@@ -605,7 +606,7 @@ bool sym_string_within_range(struct symbol *sym, const char *str)
prop = sym_get_range_prop(sym);
if (!prop)
return true;
- val = strtol(str, NULL, 10);
+ val = strtoll(str, NULL, 10);
return val >= sym_get_range_val(prop->expr->left.sym, 10) &&
val <= sym_get_range_val(prop->expr->right.sym, 10);
case S_HEX:
@@ -614,7 +615,7 @@ bool sym_string_within_range(struct symbol *sym, const char *str)
prop = sym_get_range_prop(sym);
if (!prop)
return true;
- val = strtol(str, NULL, 16);
+ val = strtoll(str, NULL, 16);
return val >= sym_get_range_val(prop->expr->left.sym, 16) &&
val <= sym_get_range_val(prop->expr->right.sym, 16);
case S_BOOLEAN:
@@ -963,11 +964,11 @@ struct sym_match {
* - first, symbols that match exactly
* - then, alphabetical sort
*/
-static int sym_rel_comp( const void *sym1, const void *sym2 )
+static int sym_rel_comp(const void *sym1, const void *sym2)
{
- struct sym_match *s1 = *(struct sym_match **)sym1;
- struct sym_match *s2 = *(struct sym_match **)sym2;
- int l1, l2;
+ const struct sym_match *s1 = sym1;
+ const struct sym_match *s2 = sym2;
+ int exact1, exact2;
/* Exact match:
* - if matched length on symbol s1 is the length of that symbol,
@@ -978,11 +979,11 @@ static int sym_rel_comp( const void *sym1, const void *sym2 )
* exactly; if this is the case, we can't decide which comes first,
* and we fallback to sorting alphabetically.
*/
- l1 = s1->eo - s1->so;
- l2 = s2->eo - s2->so;
- if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name))
+ exact1 = (s1->eo - s1->so) == strlen(s1->sym->name);
+ exact2 = (s2->eo - s2->so) == strlen(s2->sym->name);
+ if (exact1 && !exact2)
return -1;
- if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name))
+ if (!exact1 && exact2)
return 1;
/* As a fallback, sort symbols alphabetically */
@@ -992,7 +993,7 @@ static int sym_rel_comp( const void *sym1, const void *sym2 )
struct symbol **sym_re_search(const char *pattern)
{
struct symbol *sym, **sym_arr = NULL;
- struct sym_match **sym_match_arr = NULL;
+ struct sym_match *sym_match_arr = NULL;
int i, cnt, size;
regex_t re;
regmatch_t match[1];
@@ -1005,47 +1006,38 @@ struct symbol **sym_re_search(const char *pattern)
return NULL;
for_all_symbols(i, sym) {
- struct sym_match *tmp_sym_match;
if (sym->flags & SYMBOL_CONST || !sym->name)
continue;
if (regexec(&re, sym->name, 1, match, 0))
continue;
- if (cnt + 1 >= size) {
+ if (cnt >= size) {
void *tmp;
size += 16;
- tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *));
- if (!tmp) {
+ tmp = realloc(sym_match_arr, size * sizeof(struct sym_match));
+ if (!tmp)
goto sym_re_search_free;
- }
sym_match_arr = tmp;
}
sym_calc_value(sym);
- tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match));
- if (!tmp_sym_match)
- goto sym_re_search_free;
- tmp_sym_match->sym = sym;
- /* As regexec return 0, we know we have a match, so
+ /* As regexec returned 0, we know we have a match, so
* we can use match[0].rm_[se]o without further checks
*/
- tmp_sym_match->so = match[0].rm_so;
- tmp_sym_match->eo = match[0].rm_eo;
- sym_match_arr[cnt++] = tmp_sym_match;
+ sym_match_arr[cnt].so = match[0].rm_so;
+ sym_match_arr[cnt].eo = match[0].rm_eo;
+ sym_match_arr[cnt++].sym = sym;
}
if (sym_match_arr) {
- qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp);
+ qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
sym_arr = malloc((cnt+1) * sizeof(struct symbol));
if (!sym_arr)
goto sym_re_search_free;
for (i = 0; i < cnt; i++)
- sym_arr[i] = sym_match_arr[i]->sym;
+ sym_arr[i] = sym_match_arr[i].sym;
sym_arr[cnt] = NULL;
}
sym_re_search_free:
- if (sym_match_arr) {
- for (i = 0; i < cnt; i++)
- free(sym_match_arr[i]);
- free(sym_match_arr);
- }
+ /* sym_match_arr can be NULL if no match, but free(NULL) is OK */
+ free(sym_match_arr);
regfree(&re);
return sym_arr;
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index f636141e7bf..25ae16ac75c 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,9 +1,8 @@
-/* A Bison parser, made by GNU Bison 2.4.3. */
+/* A Bison parser, made by GNU Bison 2.5. */
-/* Skeleton implementation for Bison's Yacc-like parsers in C
+/* Bison implementation for Yacc-like parsers in C
- Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
- 2009, 2010 Free Software Foundation, Inc.
+ Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -45,7 +44,7 @@
#define YYBISON 1
/* Bison version. */
-#define YYBISON_VERSION "2.4.3"
+#define YYBISON_VERSION "2.5"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
@@ -302,11 +301,11 @@ YYID (yyi)
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
-# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# ifndef _STDLIB_H
-# define _STDLIB_H 1
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
# endif
# endif
# endif
@@ -329,24 +328,24 @@ YYID (yyi)
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
-# if (defined __cplusplus && ! defined _STDLIB_H \
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# ifndef _STDLIB_H
-# define _STDLIB_H 1
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
-# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
-# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
@@ -375,23 +374,7 @@ union yyalloc
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
-/* Copy COUNT objects from FROM to TO. The source and destination do
- not overlap. */
-# ifndef YYCOPY
-# if defined __GNUC__ && 1 < __GNUC__
-# define YYCOPY(To, From, Count) \
- __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-# else
-# define YYCOPY(To, From, Count) \
- do \
- { \
- YYSIZE_T yyi; \
- for (yyi = 0; yyi < (Count); yyi++) \
- (To)[yyi] = (From)[yyi]; \
- } \
- while (YYID (0))
-# endif
-# endif
+# define YYCOPY_NEEDED 1
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
@@ -411,6 +394,26 @@ union yyalloc
#endif
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (YYID (0))
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 11
/* YYLAST -- Last index in YYTABLE. */
@@ -529,18 +532,18 @@ static const yytype_int8 yyrhs[] =
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
- 0, 104, 104, 104, 106, 106, 108, 110, 111, 112,
- 113, 114, 115, 119, 123, 123, 123, 123, 123, 123,
- 123, 123, 127, 128, 129, 130, 131, 132, 136, 137,
- 143, 151, 157, 165, 175, 177, 178, 179, 180, 181,
- 182, 185, 193, 199, 209, 215, 221, 224, 226, 237,
- 238, 243, 252, 257, 265, 268, 270, 271, 272, 273,
- 274, 277, 283, 294, 300, 310, 312, 317, 325, 333,
- 336, 338, 339, 340, 345, 352, 359, 364, 372, 375,
- 377, 378, 379, 382, 390, 397, 404, 410, 417, 419,
- 420, 421, 424, 432, 434, 435, 438, 445, 447, 452,
- 453, 456, 457, 458, 462, 463, 466, 467, 470, 471,
- 472, 473, 474, 475, 476, 479, 480, 483, 484
+ 0, 103, 103, 103, 105, 105, 107, 109, 110, 111,
+ 112, 113, 114, 118, 122, 122, 122, 122, 122, 122,
+ 122, 122, 126, 127, 128, 129, 130, 131, 135, 136,
+ 142, 150, 156, 164, 174, 176, 177, 178, 179, 180,
+ 181, 184, 192, 198, 208, 214, 220, 223, 225, 236,
+ 237, 242, 251, 256, 264, 267, 269, 270, 271, 272,
+ 273, 276, 282, 293, 299, 309, 311, 316, 324, 332,
+ 335, 337, 338, 339, 344, 351, 358, 363, 371, 374,
+ 376, 377, 378, 381, 389, 396, 403, 409, 416, 418,
+ 419, 420, 423, 431, 433, 434, 437, 444, 446, 451,
+ 452, 455, 456, 457, 461, 462, 465, 466, 469, 470,
+ 471, 472, 473, 474, 475, 478, 479, 482, 483
};
#endif
@@ -615,8 +618,8 @@ static const yytype_uint8 yyr2[] =
3, 3, 2, 3, 3, 1, 1, 0, 1
};
-/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
- STATE-NUM when YYTABLE doesn't specify something else to do. Zero
+/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
@@ -691,8 +694,7 @@ static const yytype_int16 yypgoto[] =
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
- number is the opposite. If zero, do what YYDEFACT says.
- If YYTABLE_NINF, syntax error. */
+ number is the opposite. If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -86
static const yytype_int16 yytable[] =
{
@@ -728,6 +730,12 @@ static const yytype_int16 yytable[] =
184
};
+#define yypact_value_is_default(yystate) \
+ ((yystate) == (-90))
+
+#define yytable_value_is_error(yytable_value) \
+ YYID (0)
+
static const yytype_int16 yycheck[] =
{
1, 67, 68, 10, 93, 94, 76, 3, 76, 14,
@@ -821,7 +829,6 @@ do \
{ \
yychar = (Token); \
yylval = (Value); \
- yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
@@ -863,19 +870,10 @@ while (YYID (0))
#endif
-/* YY_LOCATION_PRINT -- Print the location on the stream.
- This macro was not mandated originally: define only if we know
- we won't break user code: when these are the locations we know. */
+/* This macro is provided for backward compatibility. */
#ifndef YY_LOCATION_PRINT
-# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
-# define YY_LOCATION_PRINT(File, Loc) \
- fprintf (File, "%d.%d-%d.%d", \
- (Loc).first_line, (Loc).first_column, \
- (Loc).last_line, (Loc).last_column)
-# else
-# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-# endif
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
#endif
@@ -1067,7 +1065,6 @@ int yydebug;
# define YYMAXDEPTH 10000
#endif
-
#if YYERROR_VERBOSE
@@ -1170,115 +1167,142 @@ yytnamerr (char *yyres, const char *yystr)
}
# endif
-/* Copy into YYRESULT an error message about the unexpected token
- YYCHAR while in state YYSTATE. Return the number of bytes copied,
- including the terminating null byte. If YYRESULT is null, do not
- copy anything; just return the number of bytes that would be
- copied. As a special case, return 0 if an ordinary "syntax error"
- message will do. Return YYSIZE_MAXIMUM if overflow occurs during
- size calculation. */
-static YYSIZE_T
-yysyntax_error (char *yyresult, int yystate, int yychar)
-{
- int yyn = yypact[yystate];
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
- if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
- return 0;
- else
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ YYSIZE_T yysize1;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = 0;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - Assume YYFAIL is not used. It's too flawed to consider. See
+ <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
+ for details. YYERROR is fine as it does not invoke this
+ function.
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
{
- int yytype = YYTRANSLATE (yychar);
- YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
- YYSIZE_T yysize = yysize0;
- YYSIZE_T yysize1;
- int yysize_overflow = 0;
- enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
- char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
- int yyx;
-
-# if 0
- /* This is so xgettext sees the translatable formats that are
- constructed on the fly. */
- YY_("syntax error, unexpected %s");
- YY_("syntax error, unexpected %s, expecting %s");
- YY_("syntax error, unexpected %s, expecting %s or %s");
- YY_("syntax error, unexpected %s, expecting %s or %s or %s");
- YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
-# endif
- char *yyfmt;
- char const *yyf;
- static char const yyunexpected[] = "syntax error, unexpected %s";
- static char const yyexpecting[] = ", expecting %s";
- static char const yyor[] = " or %s";
- char yyformat[sizeof yyunexpected
- + sizeof yyexpecting - 1
- + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
- * (sizeof yyor - 1))];
- char const *yyprefix = yyexpecting;
-
- /* Start YYX at -YYN if negative to avoid negative indexes in
- YYCHECK. */
- int yyxbegin = yyn < 0 ? -yyn : 0;
-
- /* Stay within bounds of both yycheck and yytname. */
- int yychecklim = YYLAST - yyn + 1;
- int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
- int yycount = 1;
-
- yyarg[0] = yytname[yytype];
- yyfmt = yystpcpy (yyformat, yyunexpected);
-
- for (yyx = yyxbegin; yyx < yyxend; ++yyx)
- if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
- {
- if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
- {
- yycount = 1;
- yysize = yysize0;
- yyformat[sizeof yyunexpected - 1] = '\0';
- break;
- }
- yyarg[yycount++] = yytname[yyx];
- yysize1 = yysize + yytnamerr (0, yytname[yyx]);
- yysize_overflow |= (yysize1 < yysize);
- yysize = yysize1;
- yyfmt = yystpcpy (yyfmt, yyprefix);
- yyprefix = yyor;
- }
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+ if (! (yysize <= yysize1
+ && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+ }
+ }
- yyf = YY_(yyformat);
- yysize1 = yysize + yystrlen (yyf);
- yysize_overflow |= (yysize1 < yysize);
- yysize = yysize1;
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
- if (yysize_overflow)
- return YYSIZE_MAXIMUM;
+ yysize1 = yysize + yystrlen (yyformat);
+ if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
- if (yyresult)
- {
- /* Avoid sprintf, as that infringes on the user's name space.
- Don't have undefined behavior even if the translation
- produced a string with the wrong number of "%s"s. */
- char *yyp = yyresult;
- int yyi = 0;
- while ((*yyp = *yyf) != '\0')
- {
- if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
- {
- yyp += yytnamerr (yyp, yyarg[yyi++]);
- yyf += 2;
- }
- else
- {
- yyp++;
- yyf++;
- }
- }
- }
- return yysize;
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
}
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
}
#endif /* YYERROR_VERBOSE */
-
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
@@ -1341,6 +1365,7 @@ yydestruct (yymsg, yytype, yyvaluep)
}
}
+
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
@@ -1367,10 +1392,9 @@ YYSTYPE yylval;
int yynerrs;
-
-/*-------------------------.
-| yyparse or yypush_parse. |
-`-------------------------*/
+/*----------.
+| yyparse. |
+`----------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1394,8 +1418,6 @@ yyparse ()
#endif
#endif
{
-
-
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
@@ -1550,7 +1572,7 @@ yybackup:
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
- if (yyn == YYPACT_NINF)
+ if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
@@ -1581,8 +1603,8 @@ yybackup:
yyn = yytable[yyn];
if (yyn <= 0)
{
- if (yyn == 0 || yyn == YYTABLE_NINF)
- goto yyerrlab;
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
@@ -1637,34 +1659,34 @@ yyreduce:
{
case 10:
- { zconf_error("unexpected end statement"); ;}
+ { zconf_error("unexpected end statement"); }
break;
case 11:
- { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); ;}
+ { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); }
break;
case 12:
{
zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[(2) - (4)].id)->name);
-;}
+}
break;
case 13:
- { zconf_error("invalid statement"); ;}
+ { zconf_error("invalid statement"); }
break;
case 28:
- { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); ;}
+ { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); }
break;
case 29:
- { zconf_error("invalid option"); ;}
+ { zconf_error("invalid option"); }
break;
case 30:
@@ -1674,7 +1696,7 @@ yyreduce:
sym->flags |= SYMBOL_OPTIONAL;
menu_add_entry(sym);
printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
-;}
+}
break;
case 31:
@@ -1682,7 +1704,7 @@ yyreduce:
{
menu_end_entry();
printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 32:
@@ -1692,7 +1714,7 @@ yyreduce:
sym->flags |= SYMBOL_OPTIONAL;
menu_add_entry(sym);
printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
-;}
+}
break;
case 33:
@@ -1704,7 +1726,7 @@ yyreduce:
zconfprint("warning: menuconfig statement without prompt");
menu_end_entry();
printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 41:
@@ -1714,7 +1736,7 @@ yyreduce:
printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
zconf_curname(), zconf_lineno(),
(yyvsp[(1) - (3)].id)->stype);
-;}
+}
break;
case 42:
@@ -1722,7 +1744,7 @@ yyreduce:
{
menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 43:
@@ -1734,7 +1756,7 @@ yyreduce:
printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
zconf_curname(), zconf_lineno(),
(yyvsp[(1) - (4)].id)->stype);
-;}
+}
break;
case 44:
@@ -1742,7 +1764,7 @@ yyreduce:
{
menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 45:
@@ -1750,7 +1772,7 @@ yyreduce:
{
menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr));
printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 48:
@@ -1762,17 +1784,17 @@ yyreduce:
else
zconfprint("warning: ignoring unknown option %s", (yyvsp[(2) - (3)].string));
free((yyvsp[(2) - (3)].string));
-;}
+}
break;
case 49:
- { (yyval.string) = NULL; ;}
+ { (yyval.string) = NULL; }
break;
case 50:
- { (yyval.string) = (yyvsp[(2) - (2)].string); ;}
+ { (yyval.string) = (yyvsp[(2) - (2)].string); }
break;
case 51:
@@ -1783,14 +1805,14 @@ yyreduce:
menu_add_entry(sym);
menu_add_expr(P_CHOICE, NULL, NULL);
printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 52:
{
(yyval.menu) = menu_add_menu();
-;}
+}
break;
case 53:
@@ -1800,7 +1822,7 @@ yyreduce:
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
}
-;}
+}
break;
case 61:
@@ -1808,7 +1830,7 @@ yyreduce:
{
menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 62:
@@ -1821,7 +1843,7 @@ yyreduce:
(yyvsp[(1) - (3)].id)->stype);
} else
YYERROR;
-;}
+}
break;
case 63:
@@ -1829,7 +1851,7 @@ yyreduce:
{
current_entry->sym->flags |= SYMBOL_OPTIONAL;
printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 64:
@@ -1841,7 +1863,7 @@ yyreduce:
zconf_curname(), zconf_lineno());
} else
YYERROR;
-;}
+}
break;
case 67:
@@ -1851,7 +1873,7 @@ yyreduce:
menu_add_entry(NULL);
menu_add_dep((yyvsp[(2) - (3)].expr));
(yyval.menu) = menu_add_menu();
-;}
+}
break;
case 68:
@@ -1861,14 +1883,14 @@ yyreduce:
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
}
-;}
+}
break;
case 74:
{
menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
-;}
+}
break;
case 75:
@@ -1877,14 +1899,14 @@ yyreduce:
menu_add_entry(NULL);
menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 76:
{
(yyval.menu) = menu_add_menu();
-;}
+}
break;
case 77:
@@ -1894,7 +1916,7 @@ yyreduce:
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
}
-;}
+}
break;
case 83:
@@ -1902,7 +1924,7 @@ yyreduce:
{
printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
zconf_nextfile((yyvsp[(2) - (3)].string));
-;}
+}
break;
case 84:
@@ -1911,14 +1933,14 @@ yyreduce:
menu_add_entry(NULL);
menu_add_prompt(P_COMMENT, (yyvsp[(2) - (3)].string), NULL);
printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 85:
{
menu_end_entry();
-;}
+}
break;
case 86:
@@ -1926,14 +1948,14 @@ yyreduce:
{
printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
zconf_starthelp();
-;}
+}
break;
case 87:
{
current_entry->help = (yyvsp[(2) - (2)].string);
-;}
+}
break;
case 92:
@@ -1941,102 +1963,113 @@ yyreduce:
{
menu_add_dep((yyvsp[(3) - (4)].expr));
printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
-;}
+}
break;
case 96:
{
menu_add_visibility((yyvsp[(2) - (2)].expr));
-;}
+}
break;
case 98:
{
menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr));
-;}
+}
break;
case 101:
- { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
break;
case 102:
- { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
break;
case 103:
- { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
+ { (yyval.id) = (yyvsp[(1) - (2)].id); }
break;
case 106:
- { (yyval.expr) = NULL; ;}
+ { (yyval.expr) = NULL; }
break;
case 107:
- { (yyval.expr) = (yyvsp[(2) - (2)].expr); ;}
+ { (yyval.expr) = (yyvsp[(2) - (2)].expr); }
break;
case 108:
- { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); ;}
+ { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); }
break;
case 109:
- { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
+ { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
break;
case 110:
- { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
+ { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
break;
case 111:
- { (yyval.expr) = (yyvsp[(2) - (3)].expr); ;}
+ { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
break;
case 112:
- { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); ;}
+ { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
break;
case 113:
- { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
+ { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
break;
case 114:
- { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
+ { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
break;
case 115:
- { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); ;}
+ { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
break;
case 116:
- { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;}
+ { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
break;
case 117:
- { (yyval.string) = NULL; ;}
+ { (yyval.string) = NULL; }
break;
default: break;
}
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
@@ -2064,6 +2097,10 @@ yyreduce:
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
@@ -2071,37 +2108,36 @@ yyerrlab:
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
{
- YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
- if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
- {
- YYSIZE_T yyalloc = 2 * yysize;
- if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
- yyalloc = YYSTACK_ALLOC_MAXIMUM;
- if (yymsg != yymsgbuf)
- YYSTACK_FREE (yymsg);
- yymsg = (char *) YYSTACK_ALLOC (yyalloc);
- if (yymsg)
- yymsg_alloc = yyalloc;
- else
- {
- yymsg = yymsgbuf;
- yymsg_alloc = sizeof yymsgbuf;
- }
- }
-
- if (0 < yysize && yysize <= yymsg_alloc)
- {
- (void) yysyntax_error (yymsg, yystate, yychar);
- yyerror (yymsg);
- }
- else
- {
- yyerror (YY_("syntax error"));
- if (yysize != 0)
- goto yyexhaustedlab;
- }
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
}
+# undef YYSYNTAX_ERROR
#endif
}
@@ -2160,7 +2196,7 @@ yyerrlab1:
for (;;)
{
yyn = yypact[yystate];
- if (yyn != YYPACT_NINF)
+ if (!yypact_value_is_default (yyn))
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
@@ -2219,8 +2255,13 @@ yyexhaustedlab:
yyreturn:
if (yychar != YYEMPTY)
- yydestruct ("Cleanup: discarding lookahead",
- yytoken, &yylval);
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ }
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
@@ -2256,9 +2297,6 @@ void conf_parse(const char *name)
sym_init();
_menu_init();
- modules_sym = sym_lookup(NULL, 0);
- modules_sym->type = S_BOOLEAN;
- modules_sym->flags |= SYMBOL_AUTO;
rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
if (getenv("ZCONF_DEBUG"))
@@ -2266,12 +2304,8 @@ void conf_parse(const char *name)
zconfparse();
if (zconfnerrs)
exit(1);
- if (!modules_sym->prop) {
- struct property *prop;
-
- prop = prop_alloc(P_DEFAULT, modules_sym);
- prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
- }
+ if (!modules_sym)
+ modules_sym = sym_find( "n" );
rootmenu.prompt->text = _(rootmenu.prompt->text);
rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 864da07ba4a..0653886fac4 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -493,9 +493,6 @@ void conf_parse(const char *name)
sym_init();
_menu_init();
- modules_sym = sym_lookup(NULL, 0);
- modules_sym->type = S_BOOLEAN;
- modules_sym->flags |= SYMBOL_AUTO;
rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
if (getenv("ZCONF_DEBUG"))
@@ -503,12 +500,8 @@ void conf_parse(const char *name)
zconfparse();
if (zconfnerrs)
exit(1);
- if (!modules_sym->prop) {
- struct property *prop;
-
- prop = prop_alloc(P_DEFAULT, modules_sym);
- prop->expr = expr_alloc_symbol(sym_lookup("MODULES", 0));
- }
+ if (!modules_sym)
+ modules_sym = sym_find( "n" );
rootmenu.prompt->text = _(rootmenu.prompt->text);
rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);