summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 10:17:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 10:17:26 -0700
commita09e9a7a4b907f2dfa9bdb2b98a1828ab4b340b2 (patch)
treec7a2df4e887573648eeaf8f7939889046990d3f6 /drivers
parent9ab073bc45b8b523cc39658925bb44bef35ca657 (diff)
parent86a7e1224a68511d3a1ae0b7e11581b9d37723ae (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm tree changes from Dave Airlie: "This is the main drm pull request, I have some overlap with sound and arm-soc, the sound patch is acked and may conflict based on -next reports but should be a trivial fixup, which I'll leave to you! Highlights: - new drivers: MSM driver from Rob Clark - non-drm: switcheroo and hdmi audio driver support for secondary GPU poweroff, so drivers can use runtime PM to poweroff the GPUs. This can save 5 or 6W on some optimus laptops. - drm core: combined GEM and TTM VMA manager per-filp mmap permission tracking initial rendernode support (via a runtime enable for now, until we get api stable), remove old proc support, lots of cleanups of legacy code hdmi vendor infoframes and 4k modes lots of gem/prime locking and races fixes async pageflip scaffolding drm bridge objects - i915: Haswell PC8+ support and eLLC support, HDMI 4K support, initial per-process VMA pieces, watermark reworks, convert to generic hdmi infoframes, encoder reworking, fastboot support, - radeon: CIK PM support, remove 3d blit code in favour of DMA engines, Berlin GPU support, HDMI audio fixes - nouveau: secondary GPU power down support for optimus laptops, lots of fixes, use MSI, VP3 engine support - exynos: runtime pm support for g2d, DT support, remove non-DT, - tda998x i2c driver: lots of fixes for sync issues - gma500: lots of cleanups - rcar: add LVDS support, fbdev emulation, - tegra: just minor fixes" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (684 commits) drm/exynos: Fix build error with exynos_drm_connector.c drm/exynos: Remove non-DT support in exynos_drm_fimd drm/exynos: Remove non-DT support in exynos_hdmi drm/exynos: Remove non-DT support in exynos_drm_g2d drm/exynos: Remove non-DT support in exynos_hdmiphy drm/exynos: Remove non-DT support in exynos_ddc drm/exynos: Make Exynos DRM drivers depend on OF drm/exynos: Consider fallback option to allocation fail drm/exynos: fimd: move platform data parsing to separate function drm/exynos: fimd: get signal polarities from device tree drm/exynos: fimd: replace struct fb_videomode with videomode drm/exynos: check a pixel format to a particular window layer drm/exynos: fix fimd pixel format setting drm/exynos: Add NULL pointer check drm/exynos: Remove redundant error messages drm/exynos: Add missing of.h header include drm/exynos: Remove redundant NULL check in exynos_drm_buf drm/exynos: add device tree support for rotator drm/exynos: Add missing includes drm/exynos: add runtime pm interfaces to g2d driver ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c236
-rw-r--r--drivers/gpu/drm/drm_context.c81
-rw-r--r--drivers/gpu/drm/drm_crtc.c173
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c89
-rw-r--r--drivers/gpu/drm/drm_dma.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c106
-rw-r--r--drivers/gpu/drm/drm_edid.c306
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_flip_work.c124
-rw-r--r--drivers/gpu/drm/drm_fops.c98
-rw-r--r--drivers/gpu/drm/drm_gem.c440
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c62
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c229
-rw-r--r--drivers/gpu/drm/drm_modes.c58
-rw-r--r--drivers/gpu/drm/drm_pci.c35
-rw-r--r--drivers/gpu/drm/drm_platform.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c190
-rw-r--r--drivers/gpu/drm/drm_proc.c209
-rw-r--r--drivers/gpu/drm/drm_scatter.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c73
-rw-r--r--drivers/gpu/drm/drm_usb.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c3
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c263
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c9
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c920
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c154
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c89
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c71
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c31
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c776
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h103
-rw-r--r--drivers/gpu/drm/gma500/gtt.c38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c15
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c65
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c63
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c43
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c48
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h (renamed from drivers/gpu/drm/gma500/psb_intel_display.h)13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c944
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h44
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c53
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c485
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c986
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c141
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c322
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h602
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c759
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c313
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c208
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1019
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1523
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h45
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c78
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1501
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c518
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h148
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c286
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c40
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1155
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c79
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c31
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c595
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c272
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h131
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c167
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c367
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c685
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c305
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c365
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h213
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv98.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c284
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile24
-rw-r--r--drivers/gpu/drm/radeon/atombios.h615
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5243
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c3127
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c785
-rw-r--r--drivers/gpu/drm/radeon/cikd.h594
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c278
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c536
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c190
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h11
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2645
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h199
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c207
-rw-r--r--drivers/gpu/drm/radeon/ni.c373
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c338
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h57
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c811
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c497
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c300
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c113
-rw-r--r--drivers/gpu/drm/radeon/r600d.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon.h275
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1262
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h119
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c186
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c88
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c58
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c204
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c101
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h16
-rw-r--r--drivers/gpu/drm/radeon/si.c844
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c235
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c80
-rw-r--r--drivers/gpu/drm/radeon/sid.h71
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c436
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c55
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig7
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c258
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h63
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c165
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.c)101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.h)17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c196
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c170
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.c)65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.h)15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c43
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c231
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c63
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/drm/dc.c2
-rw-r--r--drivers/gpu/host1x/drm/drm.c7
-rw-r--r--drivers/gpu/host1x/drm/gem.c16
-rw-r--r--drivers/gpu/host1x/drm/gem.h3
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c27
-rw-r--r--drivers/gpu/host1x/drm/rgb.c14
-rw-r--r--drivers/gpu/host1x/job.c15
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c147
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c5
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c3
-rw-r--r--drivers/video/hdmi.c141
408 files changed, 48576 insertions, 16724 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a7c54c84329..955555d6ec8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
select I2C
select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+ bool "Enable preliminary support for prerelease Intel hardware by default"
+ depends on DRM_I915
+ help
+ Choose this option if you have prerelease Intel hardware and want the
+ i915 driver to support it by default. You can enable such support at
+ runtime with the module option i915.preliminary_hw_support=1; this
+ option changes the default for that module option.
+
+ If in doubt, say "N".
+
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
@@ -223,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
+
+source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa302..f089adfe70e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+ drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
- drm_rect.o
+ drm_rect.o drm_vma_manager.o drm_flip_work.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_MSM) += msm/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097..32e270dc714 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
.dev_priv_size = 0,
.load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = ast_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9..796dbb212a4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd118..7f6152d374c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int ast_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 6e8887fe6c1..32aecb34dbc 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ast_bo *astbo = ast_bo(bo);
+
+ return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
}
static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- astbo->gem.driver_private = NULL;
astbo->bo.bdev = &ast->ttm.bdev;
astbo->bo.bdev->dev_mapping = dev->dev_mapping;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152e..138364d9178 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
- .fasync = drm_fasync,
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
.fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = cirrus_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c..9b0bb9184af 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
int cirrus_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae82777..f130a533a51 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int cirrus_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 69fd8f1ac8d..75becdeac07 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
}
static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,7 +328,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- cirrusbo->gem.driver_private = NULL;
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed17979..e301d653d97 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
}
/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+ struct drm_agp_mem *entry, *tempe;
+
+ if (!drm_core_has_AGP(dev) || !dev->agp)
+ return;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+ kfree(agp);
+}
+
+/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b7..471e051d295 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr =
- arch_phys_wc_add(map->offset, map->size);
- }
+ if (map->type == _DRM_FRAME_BUFFER ||
+ (map->flags & _DRM_WRITE_COMBINING)) {
+ map->mtrr =
+ arch_phys_wc_add(map->offset, map->size);
}
if (map->type == _DRM_REGISTERS) {
if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
+ map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev))
- arch_phys_wc_del(map->mtrr);
+ arch_phys_wc_del(map->mtrr);
break;
case _DRM_SHM:
vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return 0;
}
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
/**
* Add buffers for DMA transfers (ioctl).
*
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
struct drm_buf_desc *request = data;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
if (request->flags & _DRM_SG_BUFFER)
ret = drm_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
+ ret = -EINVAL;
else
ret = drm_addbufs_pci(dev, request);
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
int i;
int count;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
int order;
struct drm_buf_entry *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
+ order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
int idx;
struct drm_buf *buf;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_buf_map *request = data;
int i;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
if (request->count >= dma->buf_count) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
+ && (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+int drm_dma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- int order;
- unsigned long tmp;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+ if (dev->driver->dma_ioctl)
+ return dev->driver->dma_ioctl(dev, data, file_priv);
+ else
+ return -EINVAL;
+}
- if (size & (size - 1))
- ++order;
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
- return order;
+ list_for_each_entry(entry, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
+ }
+ return NULL;
}
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d3897..b4fb86d8985 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,10 +42,6 @@
#include <drm/drmP.h>
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
/**
* Free a handle from the context bitmap.
*
@@ -56,13 +52,48 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
+
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos);
+ --dev->ctx_count;
+ }
+ }
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
+}
+
/**
* Context bitmap allocation.
*
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
idr_init(&dev->ctx_idr);
- return 0;
}
/**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
return 0;
}
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return 0;
}
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
/**
* Get context.
*
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fc83bb9eb51..bff2fa941f6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
-static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
- { DRM_MODE_DITHERING_AUTO, "Automatic" },
-};
-
/*
* Non-global properties, but "required" for certain connectors.
*/
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
struct drm_conn_prop_enum_list {
int type;
const char *name;
- int count;
+ struct ida ida;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
- { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { DRM_MODE_CONNECTOR_VGA, "VGA" },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+ { DRM_MODE_CONNECTOR_Composite, "Composite" },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+ { DRM_MODE_CONNECTOR_Component, "Component" },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+ { DRM_MODE_CONNECTOR_TV, "TV" },
+ { DRM_MODE_CONNECTOR_eDP, "eDP" },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
+void drm_connector_ida_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
-/**
+/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
*
* Remove @mode from @connector's mode list, then free it.
*/
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
list_del(&mode->head);
drm_mode_destroy(connector->dev, mode);
}
-EXPORT_SYMBOL(drm_mode_remove);
/**
* drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
int connector_type)
{
int ret;
+ struct ida *connector_ida =
+ &drm_connector_enum_list[connector_type].ida;
drm_modeset_lock_all(dev);
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
connector->funcs = funcs;
connector->connector_type = connector_type;
connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
+ ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ if (connector->connector_type_id < 0) {
+ ret = connector->connector_type_id;
+ drm_mode_object_put(dev, &connector->base);
+ goto out;
+ }
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
+ ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+ connector->connector_type_id);
+
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -781,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_connector_unplug_all);
+int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+ const struct drm_bridge_funcs *funcs)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
+ if (ret)
+ goto out;
+
+ bridge->dev = dev;
+ bridge->funcs = funcs;
+
+ list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
+ dev->mode_config.num_bridge++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_init);
+
+void drm_bridge_cleanup(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &bridge->base);
+ list_del(&bridge->head);
+ dev->mode_config.num_bridge--;
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_bridge_cleanup);
+
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@@ -1135,30 +1188,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create_enum(dev, 0, "dithering",
- drm_dithering_mode_enum_list,
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -1190,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
+ total_objects += dev->mode_config.num_bridge;
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
@@ -1198,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
+ group->num_bridges = 0;
return 0;
}
@@ -1207,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct drm_bridge *bridge;
int ret;
if ((ret = drm_mode_group_init(dev, group)))
@@ -1223,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
+ list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors + group->num_bridges++] =
+ bridge->base.id;
+
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
- if (fb->funcs->create_handle)
- ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
- else
+ if (fb->funcs->create_handle) {
+ if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handle);
+ } else {
+ /* GET_FB() is an unprivileged ioctl so we must not
+ * return a buffer-handle to non-master processes! For
+ * backwards-compatibility reasons, we cannot make
+ * GET_FB() privileged, so just return an invalid handle
+ * for non-masters. */
+ r->handle = 0;
+ ret = 0;
+ }
+ } else {
ret = -ENODEV;
+ }
drm_framebuffer_unreference(fb);
@@ -3514,6 +3563,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
return -EINVAL;
@@ -3587,7 +3639,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
}
old_fb = crtc->fb;
- ret = crtc->funcs->page_flip(crtc, fb, e);
+ ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
spin_lock_irqsave(&dev->event_lock, flags);
@@ -3905,6 +3957,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -3941,6 +3994,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
+ struct drm_bridge *bridge, *brt;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
@@ -3951,6 +4005,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
encoder->funcs->destroy(encoder);
}
+ list_for_each_entry_safe(bridge, brt,
+ &dev->mode_config.bridge_list, head) {
+ bridge->funcs->destroy(bridge);
+ }
+
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6a647493ca7..c722c3b5404 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
}
/**
@@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+ ret = encoder->bridge->funcs->mode_fixup(
+ encoder->bridge, mode, adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("Bridge fixup failed\n");
+ goto done;
+ }
+ }
+
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
@@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_set)
+ encoder->bridge->funcs->mode_set(encoder->bridge, mode,
+ adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ if (encoder->bridge)
+ encoder->bridge->funcs->pre_enable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
+ if (encoder->bridge)
+ encoder->bridge->funcs->enable(encoder->bridge);
}
/* Store real post-adjustment hardware mode. */
@@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
return dpms;
}
+/* Helper which handles bridge ordering around encoder dpms */
+static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_bridge *bridge = encoder->bridge;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->pre_enable(bridge);
+ else
+ bridge->funcs->disable(bridge);
+ }
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ encoder_funcs->dpms(encoder, mode);
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->enable(bridge);
+ else
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- int old_dpms;
+ int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
@@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
old_dpms = connector->dpms;
connector->dpms = mode;
+ if (encoder)
+ encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
+
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
@@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
@@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
- int ret;
+ int ret, encoder_dpms;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if(encoder->crtc != crtc)
continue;
- encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
+ encoder_dpms = drm_helper_choose_encoder_dpms(
+ encoder);
+
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
}
crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 495b5fd2787..8a140a95375 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -44,10 +44,18 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(struct drm_device *dev)
+int drm_legacy_dma_setup(struct drm_device *dev)
{
int i;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return 0;
+ }
+
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
-void drm_dma_takedown(struct drm_device *dev)
+void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return;
+ }
+
if (!dma)
return;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea..e572dd20bde 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -172,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+ atomic_set(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+}
+
+/**
* Take down the DRM device.
*
* \param dev DRM device structure.
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
+ drm_agp_clear(dev);
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
+ drm_legacy_sg_cleanup(dev);
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
kfree(vma);
}
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
+ drm_legacy_dma_takedown(dev);
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
+ drm_legacy_dev_reinit(dev);
+
DRM_DEBUG("lastclose completed\n");
return 0;
}
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
int ret = -ENOMEM;
drm_global_init();
+ drm_connector_ida_init();
idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
goto err_p2;
}
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
static void __exit drm_core_exit(void)
{
- remove_proc_entry("dri", NULL);
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
+ drm_connector_ida_destroy();
idr_destroy(&drm_minors_idr);
}
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
/* Do not trust userspace, use our own definition */
func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+ (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
+ (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
-
EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 95d6f4b6967..1688ff50051 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
/*
@@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
.vrefresh = 100, },
};
+/*
+ * HDMI 1.4 4k modes.
+ */
+static const struct drm_display_mode edid_4k_modes[] = {
+ /* 1 - 3840x2160@30Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4016, 4104, 4400, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, },
+ /* 2 - 3840x2160@25Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4896, 4984, 5280, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, },
+ /* 3 - 3840x2160@24Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+ /* 4 - 4096x2160@24Hz (SMPTE) */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 4096, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+};
+
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
return closure.modes;
}
-#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
@@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
-/**
+/*
* Search EDID for CEA extension block.
*/
-u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
-EXPORT_SYMBOL(drm_find_cea_extension);
/*
* Calculate the alternate clock for the CEA mode
@@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
}
EXPORT_SYMBOL(drm_match_cea_mode);
+/*
+ * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
+ * specific block).
+ *
+ * It's almost like cea_mode_alternate_clock(), we just need to add an
+ * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
+ * one.
+ */
+static unsigned int
+hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
+{
+ if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
+ return hdmi_mode->clock;
+
+ return cea_mode_alternate_clock(hdmi_mode);
+}
+
+/*
+ * drm_match_hdmi_mode - look for a HDMI mode matching given mode
+ * @to_match: display mode
+ *
+ * An HDMI mode is one defined in the HDMI vendor specific block.
+ *
+ * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
+ */
+static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
+ const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Make sure to also match alternate clocks */
+ clock1 = hdmi_mode->clock;
+ clock2 = hdmi_mode_alternate_clock(hdmi_mode);
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks(to_match, hdmi_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
- const struct drm_display_mode *cea_mode;
+ const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
- u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+ u8 mode_idx = drm_match_cea_mode(mode) - 1;
unsigned int clock1, clock2;
- if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
- continue;
+ if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
+ cea_mode = &edid_cea_modes[mode_idx];
+ clock2 = cea_mode_alternate_clock(cea_mode);
+ } else {
+ mode_idx = drm_match_hdmi_mode(mode) - 1;
+ if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
+ cea_mode = &edid_4k_modes[mode_idx];
+ clock2 = hdmi_mode_alternate_clock(cea_mode);
+ }
+ }
- cea_mode = &edid_cea_modes[cea_mode_idx];
+ if (!cea_mode)
+ continue;
clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
if (clock1 == clock2)
continue;
@@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
}
static int
-do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
- u8 * mode, cea_mode;
+ const u8 *mode;
+ u8 cea_mode;
int modes = 0;
for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
return modes;
}
+/*
+ * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ * @len: length of the CEA block payload, ie. one can access up to db[len]
+ *
+ * Parses the HDMI VSDB looking for modes to add to @connector.
+ */
+static int
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ int modes = 0, offset = 0, i;
+ u8 vic_len;
+
+ if (len < 8)
+ goto out;
+
+ /* no HDMI_Video_Present */
+ if (!(db[8] & (1 << 5)))
+ goto out;
+
+ /* Latency_Fields_Present */
+ if (db[8] & (1 << 7))
+ offset += 2;
+
+ /* I_Latency_Fields_Present */
+ if (db[8] & (1 << 6))
+ offset += 2;
+
+ /* the declared length is not long enough for the 2 first bytes
+ * of additional video format capabilities */
+ offset += 2;
+ if (len < (8 + offset))
+ goto out;
+
+ vic_len = db[8 + offset] >> 5;
+
+ for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
+ struct drm_display_mode *newmode;
+ u8 vic;
+
+ vic = db[9 + offset + i];
+
+ vic--; /* VICs start at 1 */
+ if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+ DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+ continue;
+ }
+
+ newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+out:
+ return modes;
+}
+
static int
cea_db_payload_len(const u8 *db)
{
@@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
return 0;
}
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI;
+}
+
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
- u8 * cea = drm_find_cea_extension(edid);
- u8 * db, dbl;
+ const u8 *cea = drm_find_cea_extension(edid);
+ const u8 *db;
+ u8 dbl;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2683,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
if (cea_db_tag(db) == VIDEO_BLOCK)
- modes += do_cea_modes (connector, db+1, dbl);
+ modes += do_cea_modes(connector, db + 1, dbl);
+ else if (cea_db_is_hdmi_vsdb(db))
+ modes += do_hdmi_vsdb_modes(connector, db, dbl);
}
}
@@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
-static bool cea_db_is_hdmi_vsdb(const u8 *db)
-{
- int hdmi_id;
-
- if (cea_db_tag(db) != VENDOR_BLOCK)
- return false;
-
- if (cea_db_payload_len(db) < 5)
- return false;
-
- hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
- return hdmi_id == HDMI_IDENTIFIER;
-}
-
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@@ -2732,6 +2885,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
EXPORT_SYMBOL(drm_edid_to_sad);
/**
+ * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
+ * @edid: EDID to parse
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found Speaker Allocation Blocks or negative number on error.
+ */
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ const u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ const u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == SPEAKER_BLOCK) {
+ dbl = cea_db_payload_len(db);
+
+ /* Speaker Allocation Data Block */
+ if (dbl == 3) {
+ *sadb = kmalloc(dbl, GFP_KERNEL);
+ memcpy(*sadb, &db[1], dbl);
+ count = dbl;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
+/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
@@ -3102,9 +3307,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (err < 0)
return err;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ frame->pixel_repeat = 1;
+
frame->video_code = drm_match_cea_mode(mode);
- if (!frame->video_code)
- return 0;
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@@ -3112,3 +3318,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+
+/**
+ * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI vendor infoframe
+ * @mode: DRM display mode
+ *
+ * Note that there's is a need to send HDMI vendor infoframes only when using a
+ * 4k or stereoscopic 3D mode. So when giving any other mode as input this
+ * function will return -EINVAL, error that can be safely ignored.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode)
+{
+ int err;
+ u8 vic;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ vic = drm_match_hdmi_mode(mode);
+ if (!vic)
+ return -EINVAL;
+
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->vic = vic;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c385cc5e730..61b5a47ad23 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
#ifdef CONFIG_DEBUG_FS
-/**
+/*
* drm_fb_cma_describe() - Helper to dump information about a single
* CMA framebuffer object
*/
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
drm_gem_cma_describe(fb_cma->obj[i], m);
}
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
/**
* drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 00000000000..e788882d902
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_flip_work.h"
+
+/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+ *
+ * Queues work, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+{
+ if (kfifo_put(&work->fifo, (const void **)&val)) {
+ atomic_inc(&work->pending);
+ } else {
+ DRM_ERROR("%s fifo full!\n", work->name);
+ work->func(work, val);
+ }
+}
+EXPORT_SYMBOL(drm_flip_work_queue);
+
+/**
+ * drm_flip_work_commit - commit queued work
+ * @work: the flip-work
+ * @wq: the work-queue to run the queued work on
+ *
+ * Trigger work previously queued by drm_flip_work_queue() to run
+ * on a workqueue. The typical usage would be to queue work (via
+ * drm_flip_work_queue()) at any point (from vblank irq and/or
+ * prior), and then from vblank irq commit the queued work.
+ */
+void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq)
+{
+ uint32_t pending = atomic_read(&work->pending);
+ atomic_add(pending, &work->count);
+ atomic_sub(pending, &work->pending);
+ queue_work(wq, &work->worker);
+}
+EXPORT_SYMBOL(drm_flip_work_commit);
+
+static void flip_worker(struct work_struct *w)
+{
+ struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+ uint32_t count = atomic_read(&work->count);
+ void *val = NULL;
+
+ atomic_sub(count, &work->count);
+
+ while(count--)
+ if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+ work->func(work, val);
+}
+
+/**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+ * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+ const char *name, drm_flip_func_t func)
+{
+ int ret;
+
+ work->name = name;
+ atomic_set(&work->count, 0);
+ atomic_set(&work->pending, 0);
+ work->func = func;
+
+ ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("could not allocate %s fifo\n", name);
+ return ret;
+ }
+
+ INIT_WORK(&work->worker, flip_worker);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_flip_work_init);
+
+/**
+ * drm_flip_work_cleanup - cleans up flip-work
+ * @work: the flip-work to cleanup
+ *
+ * Destroy resources allocated for the flip-work
+ */
+void drm_flip_work_cleanup(struct drm_flip_work *work)
+{
+ WARN_ON(!kfifo_is_empty(&work->fifo));
+ kfifo_free(&work->fifo);
+}
+EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e036..4be8e09a32e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev)
{
- int i;
int ret;
- if (dev->driver->firstopen) {
+ if (dev->driver->firstopen &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
+ ret = drm_legacy_dma_setup(dev);
+ if (ret < 0)
+ return ret;
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
return 0;
}
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_prime_destroy;
}
-
- /* if there is no current master make this fd it */
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
+ if (!priv->minor->master && !drm_is_render_client(priv)) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_close;
}
}
- mutex_unlock(&dev->struct_mutex);
- } else {
+ } else if (!drm_is_render_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
return ret;
}
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
+ drm_legacy_ctxbitmap_release(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
iput(container_of(dev->dev_mapping, struct inode, i_data));
/* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152e..49293bdc972 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
/** @file drm_gem.c
*
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
- spin_lock_init(&dev->object_name_lock);
+ mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
}
dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 12)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ drm_vma_offset_manager_init(&mm->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
+ drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+ struct file *filp;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- return PTR_ERR(obj->filp);
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
- kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
- obj->size = size;
+ drm_gem_private_object_init(dev, obj, size);
+ obj->filp = filp;
return 0;
}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
-int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
obj->filp = NULL;
kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
+ obj->handle_count = 0;
obj->size = size;
-
- return 0;
+ drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
- if (obj->import_attach) {
- drm_prime_remove_buf_handle(&filp->prime,
- obj->import_attach->dmabuf);
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ mutex_lock(&filp->prime.lock);
+ if (obj->dma_buf) {
+ drm_prime_remove_buf_handle_locked(&filp->prime,
+ obj->dma_buf);
}
- if (obj->export_dma_buf) {
- drm_prime_remove_buf_handle(&filp->prime,
- obj->export_dma_buf);
+ mutex_unlock(&filp->prime.lock);
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
+ */
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (WARN_ON(obj->handle_count == 0))
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ mutex_lock(&obj->dev->object_name_lock);
+ if (--obj->handle_count == 0) {
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
+ mutex_unlock(&obj->dev->object_name_lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- drm_gem_remove_prime_handles(obj, filp);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, filp);
+ drm_vma_node_revoke(&obj->vma_node, filp->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ *
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
*/
int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
struct drm_device *dev = obj->dev;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
/*
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+ drm_gem_object_reference(obj);
+ obj->handle_count++;
spin_unlock(&file_priv->table_lock);
idr_preload_end();
- if (ret < 0)
+ mutex_unlock(&dev->object_name_lock);
+ if (ret < 0) {
+ drm_gem_object_handle_unreference_unlocked(obj);
return ret;
+ }
*handlep = ret;
- drm_gem_object_handle_reference(obj);
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
return 0;
}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ mutex_lock(&obj->dev->object_name_lock);
+
+ return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
EXPORT_SYMBOL(drm_gem_handle_create);
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
+ drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
/**
- * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
* @obj: obj in question
+ * @size: the virtual size
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
- * This routine allocates and attaches a fake offset for @obj.
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size). Otherwise
+ * just use drm_gem_create_mmap_offset().
*/
int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
+ return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, false);
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
}
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
+ return pages;
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
- return 0;
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
- return ret;
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
}
-EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+EXPORT_SYMBOL(drm_gem_put_pages);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
+ mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
- spin_lock(&dev->object_name_lock);
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ ret = -ENOENT;
+ goto err;
+ }
+
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
ret = 0;
err:
- spin_unlock(&dev->object_name_lock);
idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- spin_lock(&dev->object_name_lock);
+ mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
+ if (obj) {
drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
+ } else {
+ mutex_unlock(&dev->object_name_lock);
return -ENOENT;
+ }
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- drm_gem_remove_prime_handles(obj, file_priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
+ WARN_ON(obj->dma_buf);
+
if (obj->filp)
fput(obj->filp);
}
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_free);
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- *
- * This cannot be the last reference, since the handle holds one too.
- */
- kref_put(&obj->refcount, drm_gem_object_ref_bug);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
+ * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
+ * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
+ * callers must verify access restrictions before calling this helper.
+ *
* NOTE: This function has to be protected with dev->struct_mutex
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object) and map it with a call to drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_hash_item *hash;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
int ret = 0;
if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+ node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
}
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
-out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 61c1d17f870..6b51bf90df0 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
/*
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
cma_obj = to_drm_gem_cma_obj(gem_obj);
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
return -EINVAL;
}
- *offset = get_gem_mmap_offset(gem_obj);
+ *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_unreference(gem_obj);
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *drm, unsigned int handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
#ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
struct drm_device *dev = obj->dev;
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ceda3f..53298320080 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
- atomic_read(&obj->handle_count),
+ obj->handle_count,
atomic_read(&obj->refcount.refcount));
return 0;
}
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
seq_printf(m, " name size handles refcount\n");
+
+ mutex_lock(&dev->object_name_lock);
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ffd7a7ba70d..07247e2855a 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
- idx = client->idx;
- i = 0;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pid_vnr(pt->pid);
- client->uid = from_kuid_munged(current_user_ns(), pt->uid);
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
+ /*
+ * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
+ * not breaking completely. Userspace tools stop enumerating one they
+ * get -EINVAL, hence this is the return value we need to hand back for
+ * no clients tracked.
+ *
+ * Unfortunately some clients (*cough* libva *cough*) use this in a fun
+ * attempt to figure out whether they're authenticated or not. Since
+ * that's the only thing they care about, give it to the directly
+ * instead of walking one giant list.
+ */
+ if (client->idx == 0) {
+ client->auth = file_priv->authenticated;
+ client->pid = pid_vnr(file_priv->pid);
+ client->uid = from_kuid_munged(current_user_ns(),
+ file_priv->uid);
+ client->magic = 0;
+ client->iocs = 0;
+
+ return 0;
+ } else {
+ return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
}
/**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
- int i;
+ /* Clear stats to prevent userspace from eating its stack garbage. */
memset(stats, 0, sizeof(*stats));
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
- stats->count = dev->counters;
-
return 0;
}
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
break;
+ case DRM_CAP_ASYNC_PAGE_FLIP:
+ req->value = dev->mode_config.async_page_flip;
+ break;
default:
return -EINVAL;
}
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
retcode = -EINVAL;
goto done;
}
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
}
done:
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 126d50ea181..64e44fad8ae 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
agp_free_memory(handle);
}
-EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return agp_unbind_memory(handle);
}
-EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d..af93cc55259 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
#define MM_UNUSED_TARGET 4
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kzalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kzalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, node_list);
- list_del(&child->node_list);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->node_list, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- struct drm_mm_node *hole, *node;
- unsigned long end = start + size;
+ struct drm_mm_node *hole;
+ unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
+ BUG_ON(node == NULL);
+
+ /* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > start || hole_end < end)
+ if (hole_start > node->start || hole_end < end)
continue;
- node = drm_mm_kmalloc(mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- node->start = start;
- node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
- if (start == hole_start) {
+ if (node->start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
- return node;
+ return 0;
}
- WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mm_create_block);
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
- return node;
+ WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+ node->start, node->size);
+ return -ENOSPC;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_reserve_node);
/**
* Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, 0);
+ color, flags);
if (!hole_node)
return -ENOSPC;
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
-{
- return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
- start, end);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
- start, end, 0);
+ start, end, flags);
if (!hole_node)
return -ENOSPC;
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
-{
- return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
/**
* Remove a memory node from the allocator.
*/
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
+ if (WARN_ON(!node->allocated))
+ return;
+
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
- struct drm_mm *mm = node->mm;
-
- drm_mm_remove_node(node);
-
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&node->node_list, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(node);
- spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
return end >= start + size;
}
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_generic);
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
mm->scanned_blocks = 0;
- spin_lock_init(&mm->unused_lock);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct drm_mm_node *entry, *next;
-
- if (WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean. Delaying takedown\n")) {
- return;
- }
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
- list_del(&entry->node_list);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
+ WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a6729bfe686..fc2adb62b75 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -596,27 +596,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_set_name);
/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
-/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
@@ -923,43 +902,6 @@ void drm_mode_validate_size(struct drm_device *dev,
EXPORT_SYMBOL(drm_mode_validate_size);
/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b2980..1f96cee6eee 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
-#if 1
unsigned long addr;
size_t sz;
-#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
-#if 1
unsigned long addr;
size_t sz;
-#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
}
}
return 0;
}
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev) && dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_agp_clear(dev);
+ drm_agp_destroy(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
+ .agp_destroy = drm_pci_agp_destroy,
};
/**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
goto err_g2;
}
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g21;
+ }
+
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
err_g4:
drm_put_minor(&dev->primary);
err_g3:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g21:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index b8a282ea875..f7a18c6ba4c 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -28,7 +28,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
-/**
+/*
* Register.
*
* \param platdev - Platform device struture
@@ -39,8 +39,8 @@
* Try and register, if we fail to register, backout previous work.
*/
-int drm_get_platform_dev(struct platform_device *platdev,
- struct drm_driver *driver)
+static int drm_get_platform_dev(struct platform_device *platdev,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
goto err_g1;
}
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g11;
+ }
+
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
err_g3:
drm_put_minor(&dev->primary);
err_g2:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g11:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g1:
@@ -107,7 +116,6 @@ err_g1:
mutex_unlock(&drm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_get_platform_dev);
static int drm_platform_get_irq(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e3241..276d470f7b3 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
return 0;
}
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->handle == handle)
+ return member->dma_buf;
+ }
+
+ return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf,
+ uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
static int drm_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
attach->priv = NULL;
}
-static void drm_prime_remove_buf_handle_locked(
- struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- mutex_lock(&obj->dev->struct_mutex);
-
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
}
- mutex_unlock(&obj->dev->struct_mutex);
return sgt;
}
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* nothing to be done here */
}
-static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- if (obj->export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(obj);
- }
+ /* drop the reference on the export fd holds */
+ drm_gem_object_unreference_unlocked(obj);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ uint32_t flags)
+{
+ struct dma_buf *dmabuf;
+
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ dmabuf = ERR_PTR(-ENOENT);
+ return dmabuf;
+ }
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ return dmabuf;
+ }
+
+ /*
+ * Note that callers do not need to clean up the export cache
+ * since the check for obj->handle_count guarantees that someone
+ * will clean it up.
+ */
+ obj->dma_buf = dmabuf;
+ get_dma_buf(obj->dma_buf);
+ /* Grab a new ref since the callers is now used by the dma-buf */
+ drm_gem_object_reference(obj);
+
+ return dmabuf;
+}
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct drm_gem_object *obj;
- void *buf;
int ret = 0;
struct dma_buf *dmabuf;
+ mutex_lock(&file_priv->prime.lock);
obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
- mutex_lock(&file_priv->prime.lock);
+ dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+ if (dmabuf) {
+ get_dma_buf(dmabuf);
+ goto out_have_handle;
+ }
+
+ mutex_lock(&dev->object_name_lock);
/* re-export the original imported object */
if (obj->import_attach) {
dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
goto out_have_obj;
}
- if (obj->export_dma_buf) {
- dmabuf = obj->export_dma_buf;
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
+ dmabuf = obj->dma_buf;
goto out_have_obj;
}
- buf = dev->driver->gem_prime_export(dev, obj, flags);
- if (IS_ERR(buf)) {
+ dmabuf = export_and_register_object(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
- ret = PTR_ERR(buf);
+ ret = PTR_ERR(dmabuf);
+ mutex_unlock(&dev->object_name_lock);
goto out;
}
- obj->export_dma_buf = buf;
- /* if we've exported this buffer the cheat and add it to the import list
- * so we get the correct handle back
+out_have_obj:
+ /*
+ * If we've exported this buffer then cheat and add it to the import list
+ * so we get the correct handle back. We must do this under the
+ * protection of dev->object_name_lock to ensure that a racing gem close
+ * ioctl doesn't miss to remove this buffer handle from the cache.
*/
ret = drm_prime_add_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
+ dmabuf, handle);
+ mutex_unlock(&dev->object_name_lock);
if (ret)
goto fail_put_dmabuf;
- ret = dma_buf_fd(buf, flags);
- if (ret < 0)
- goto fail_rm_handle;
-
- *prime_fd = ret;
- mutex_unlock(&file_priv->prime.lock);
- return 0;
-
-out_have_obj:
- get_dma_buf(dmabuf);
+out_have_handle:
ret = dma_buf_fd(dmabuf, flags);
+ /*
+ * We must _not_ remove the buffer from the handle cache since the newly
+ * created dma buf is already linked in the global obj->dma_buf pointer,
+ * and that is invariant as long as a userspace gem handle exists.
+ * Closing the handle will clean out the cache anyway, so we don't leak.
+ */
if (ret < 0) {
- dma_buf_put(dmabuf);
+ goto fail_put_dmabuf;
} else {
*prime_fd = ret;
ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
goto out;
-fail_rm_handle:
- drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
fail_put_dmabuf:
- /* clear NOT to be checked when releasing dma_buf */
- obj->export_dma_buf = NULL;
- dma_buf_put(buf);
+ dma_buf_put(dmabuf);
out:
drm_gem_object_unreference_unlocked(obj);
+out_unlock:
mutex_unlock(&file_priv->prime.lock);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
- if (!ret) {
- ret = 0;
+ if (ret == 0)
goto out_put;
- }
/* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
obj = dev->driver->gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto out_put;
+ goto out_unlock;
}
- ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
goto out_put;
@@ -478,7 +550,9 @@ fail:
/* hmm, if driver attached, we are relying on the free-object path
* to detach.. which seems ok..
*/
- drm_gem_object_handle_unreference_unlocked(obj);
+ drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
out_put:
dma_buf_put(dma_buf);
mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
WARN_ON(!list_empty(&prime_fpriv->head));
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);
-
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
-{
- struct drm_prime_member *member;
-
- list_for_each_entry(member, &prime_fpriv->head, entry) {
- if (member->dma_buf == dma_buf) {
- *handle = member->handle;
- return 0;
- }
- }
- return -ENOENT;
-}
-EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
-{
- mutex_lock(&prime_fpriv->lock);
- drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
- mutex_unlock(&prime_fpriv->lock);
-}
-EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d7f2324b4fb..00000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <drm/drmP.h>
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static const struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE_DATA(inode);
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-static int drm_proc_create_files(const struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (!tmp)
- return -1;
-
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
- minor->index, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- return -1;
- }
- }
- return 0;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- char name[12];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%u", minor->index);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_subtree(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- return 0;
-}
-
-static int drm_proc_remove_files(const struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_subtree(name, root);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc33..1c78406f6e7 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
#endif
}
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
kfree(entry);
}
+void drm_legacy_sg_cleanup(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_sg_cleanup(dev->sg);
+ dev->sg = NULL;
+ }
+}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
+ struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
return -ENOMEM;
}
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda8..e7eb0276f7f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,6 +40,9 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
+EXPORT_SYMBOL(drm_rnodes);
+
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(rnodes, drm_rnodes, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
int base = 0, limit = 63;
if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
+ base += 64;
+ limit = base + 63;
+ } else if (type == DRM_MINOR_RENDER) {
+ base += 128;
+ limit = base + 63;
+ }
mutex_lock(&dev->struct_mutex);
ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
goto error_out_unreg;
}
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
+ drm_legacy_ctxbitmap_init(dev);
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
* \param sec-minor structure to hold the assigned minor
* \return negative number on failure.
*
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
+ * Search an empty entry and initialize it to the given parameters. This
+ * routines assigns minor numbers to secondary heads of multi-headed cards
*/
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
idr_replace(&drm_minors_idr, new_minor, minor_id);
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
-
#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
+ goto err_mem;
}
#endif
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
- goto err_g2;
+ goto err_debugfs;
}
*minor = new_minor;
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
return 0;
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
+err_debugfs:
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_cleanup(new_minor);
err_mem:
+#endif
kfree(new_minor);
err_idr:
idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
*
* \param sec_minor - structure to be released
* \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
*/
int drm_put_minor(struct drm_minor **minor_p)
{
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
DRM_DEBUG("release secondary minor %d\n", minor->index);
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
drm_lastclose(dev);
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
- arch_phys_wc_del(dev->agp->agp_mtrr);
-
if (dev->driver->unload)
dev->driver->unload(dev);
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
drm_vblank_cleanup(dev);
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
- drm_ctxbitmap_cleanup(dev);
+ drm_legacy_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
+ if (dev->render)
+ drm_put_minor(&dev->render);
+
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
/* for a USB device */
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_unplug_minor(dev->control);
+ if (dev->render)
+ drm_unplug_minor(dev->render);
drm_unplug_minor(dev->primary);
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 34a156f0c33..87664723b9c 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
if (ret)
goto err_g1;
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g11;
+ }
+
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
err_g3:
drm_put_minor(&dev->primary);
err_g2:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g11:
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index feb20035b2c..b5c5af7328d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev))
- arch_phys_wc_del(map->mtrr);
+ arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 00000000000..63b47120507
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ *
+ * Additionally to offset management, the vma offset manager also handles access
+ * management. For every open-file context that is allowed to access a given
+ * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
+ * open-file with the offset of the node will fail with -EACCES. To revoke
+ * access again, use drm_vma_node_revoke(). However, the caller is responsible
+ * for destroying already existing mappings, if required.
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size)
+{
+ rwlock_init(&mgr->vm_lock);
+ mgr->vm_addr_space_rb = RB_ROOT;
+ drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+ /* take the lock to protect against buggy drivers */
+ write_lock(&mgr->vm_lock);
+ drm_mm_takedown(&mgr->vm_addr_space_mm);
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ read_lock(&mgr->vm_lock);
+ node = drm_vma_offset_lookup_locked(mgr, start, pages);
+ read_unlock(&mgr->vm_lock);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+
+ iter = mgr->vm_addr_space_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ /* verify that the node spans the requested area */
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset < start + pages)
+ best = NULL;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages)
+{
+ int ret;
+
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+ pages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto out_unlock;
+
+ _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+ drm_mm_remove_node(&node->vm_node);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to add
+ *
+ * Add @filp to the list of allowed open-files for this node. If @filp is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct rb_node **iter;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_file *new, *entry;
+ int ret = 0;
+
+ /* Preallocate entry to avoid atomic allocations below. It is quite
+ * unlikely that an open-file is added twice to a single node so we
+ * don't optimize for this case. OOM is checked below only if the entry
+ * is actually used. */
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ write_lock(&node->vm_lock);
+
+ iter = &node->vm_files.rb_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+ if (filp == entry->vm_filp) {
+ entry->vm_count++;
+ goto unlock;
+ } else if (filp > entry->vm_filp) {
+ iter = &(*iter)->rb_right;
+ } else {
+ iter = &(*iter)->rb_left;
+ }
+ }
+
+ if (!new) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ new->vm_filp = filp;
+ new->vm_count = 1;
+ rb_link_node(&new->vm_rb, parent, iter);
+ rb_insert_color(&new->vm_rb, &node->vm_files);
+ new = NULL;
+
+unlock:
+ write_unlock(&node->vm_lock);
+ kfree(new);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_node_allow);
+
+/**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to remove
+ *
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * If @filp is not on the list, nothing is done.
+ */
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ write_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp) {
+ if (!--entry->vm_count) {
+ rb_erase(&entry->vm_rb, &node->vm_files);
+ kfree(entry);
+ }
+ break;
+ } else if (filp > entry->vm_filp) {
+ iter = iter->rb_right;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ write_unlock(&node->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_node_revoke);
+
+/**
+ * drm_vma_node_is_allowed - Check whether an open-file is granted access
+ * @node: Node to check
+ * @filp: Open-file to check for
+ *
+ * Search the list in @node whether @filp is currently on the list of allowed
+ * open-files (see drm_vma_node_allow()).
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * true iff @filp is on the list
+ */
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ read_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp)
+ break;
+ else if (filp > entry->vm_filp)
+ iter = iter->rb_right;
+ else
+ iter = iter->rb_left;
+ }
+
+ read_unlock(&node->vm_lock);
+
+ return iter;
+}
+EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 772c62a6e2a..4752f223e5b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,11 +1,12 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
- depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+ depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
select FB_MODE_HELPERS
- select VIDEOMODE_HELPERS
help
Choose this option if you want to use Exynos FIMD for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 30ef41bcd7b..6a8c84e7c83 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-
+#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
@@ -41,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id ddc_idtable[] = {
- {"s5p_ddc", 0},
- {"exynos5-hdmiddc", 0},
- { },
-};
-
-#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -57,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
-#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(hdmiddc_match_types),
+ .of_match_table = hdmiddc_match_types,
},
- .id_table = ddc_idtable,
.probe = s5p_ddc_probe,
.remove = s5p_ddc_remove,
.command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index b8ac06d92fb..3445a0f3a6b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
DRM_DEBUG_KMS("desired size = 0x%x\n", size);
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ if (!buffer)
return NULL;
- }
buffer->size = size;
return buffer;
@@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer)
{
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null.\n");
- return;
- }
-
kfree(buffer);
buffer = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 02a8bc5226c..e082efb2fec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -17,6 +17,7 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
@@ -28,35 +29,6 @@ struct exynos_drm_connector {
uint32_t dpms;
};
-/* convert exynos_video_timings to drm_display_mode */
-static inline void
-convert_to_display_mode(struct drm_display_mode *mode,
- struct exynos_drm_panel_info *panel)
-{
- struct fb_videomode *timing = &panel->timing;
-
- mode->clock = timing->pixclock / 1000;
- mode->vrefresh = timing->refresh;
-
- mode->hdisplay = timing->xres;
- mode->hsync_start = mode->hdisplay + timing->right_margin;
- mode->hsync_end = mode->hsync_start + timing->hsync_len;
- mode->htotal = mode->hsync_end + timing->left_margin;
-
- mode->vdisplay = timing->yres;
- mode->vsync_start = mode->vdisplay + timing->lower_margin;
- mode->vsync_end = mode->vsync_start + timing->vsync_len;
- mode->vtotal = mode->vsync_end + timing->upper_margin;
- mode->width_mm = panel->width_mm;
- mode->height_mm = panel->height_mm;
-
- if (timing->vmode & FB_VMODE_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timing->vmode & FB_VMODE_DOUBLE)
- mode->flags |= DRM_MODE_FLAG_DBLSCAN;
-}
-
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- convert_to_display_mode(mode, panel);
+ drm_display_mode_from_videomode(&panel->vm, mode);
+ mode->width_mm = panel->width_mm;
+ mode->height_mm = panel->height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
int err;
exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
- if (!exynos_connector) {
- DRM_ERROR("failed to allocate connector\n");
+ if (!exynos_connector)
return NULL;
- }
connector = &exynos_connector->drm_connector;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9a35d171a6d..ebc01503d50 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
@@ -184,8 +185,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
};
static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -323,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
struct drm_crtc *crtc;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
- if (!exynos_crtc) {
- DRM_ERROR("failed to allocate exynos crtc\n");
+ if (!exynos_crtc)
return -ENOMEM;
- }
exynos_crtc->pipe = nr;
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e0cbd..59827cc5e77 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -11,6 +11,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
+#include "exynos_drm_dmabuf.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
@@ -22,6 +23,11 @@ struct exynos_drm_dmabuf_attachment {
bool is_mapped;
};
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_exynos_gem_obj(buf->priv);
+}
+
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
@@ -63,7 +69,7 @@ static struct sg_table *
enum dma_data_direction dir)
{
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
@@ -127,27 +133,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* Nothing to do. */
}
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
- /*
- * exynos_dmabuf_release() call means that file object's
- * f_count is 0 and it calls drm_gem_object_handle_unreference()
- * to drop the references that these values had been increased
- * at drm_prime_handle_to_fd()
- */
- if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
- exynos_gem_obj->base.export_dma_buf = NULL;
-
- /*
- * drop this gem object refcount to release allocated buffer
- * and resources.
- */
- drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
- }
-}
-
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -193,7 +178,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.mmap = exynos_gem_dmabuf_mmap,
- .release = exynos_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
};
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +186,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ return dma_buf_export(obj, &exynos_dmabuf_ops,
exynos_gem_obj->base.size, flags);
}
@@ -219,8 +204,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
- exynos_gem_obj = dma_buf->priv;
- obj = &exynos_gem_obj->base;
+ obj = dma_buf->priv;
/* is it from our device? */
if (obj->dev == drm_dev) {
@@ -247,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
- DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
ret = -ENOMEM;
goto err_unmap_attach;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a8512..bb82ef78ca8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
int nr;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
- if (!private) {
- DRM_ERROR("failed to allocate private\n");
+ if (!private)
return -ENOMEM;
- }
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
@@ -213,7 +211,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.close = drm_gem_vm_close,
};
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +269,13 @@ static struct drm_driver exynos_drm_driver = {
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = exynos_dmabuf_prime_export,
.gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
+ .num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -288,7 +287,6 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index a99a033793b..06f1b2a09da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
- if (!exynos_encoder) {
- DRM_ERROR("failed to allocate encoder\n");
+ if (!exynos_encoder)
return NULL;
- }
exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
exynos_encoder->manager = manager;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c2d149f0408..ea39e0ef2ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
}
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ if (!exynos_fb)
return ERR_PTR(-ENOMEM);
- }
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
int i, ret;
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ if (!exynos_fb)
return ERR_PTR(-ENOMEM);
- }
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!obj) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8e60bd61137..78e868bcf1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,9 +16,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
@@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- /* 0 means to allocate physically continuous memory */
- exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+ /*
+ * If physically contiguous memory allocation fails and if IOMMU is
+ * supported then try to get buffer from non physically contiguous
+ * memory area.
+ */
+ if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
+ size);
+ }
+
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_release_framebuffer;
@@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev) {
- DRM_ERROR("failed to allocate drm fbdev.\n");
+ if (!fbdev)
return -ENOMEM;
- }
private->fb_helper = helper = &fbdev->drm_fb_helper;
helper->funcs = &exynos_drm_fb_helper_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6e047bd53e2..8adfc8f1e08 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,10 +17,12 @@
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_fimc.h"
@@ -1343,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 1c263dac3c1..868a14d5299 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -16,10 +16,12 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
#include <video/samsung_fimd.h>
#include <drm/exynos_drm.h>
@@ -35,6 +37,8 @@
* CPU Interface.
*/
+#define FIMD_DEFAULT_FRAMERATE 60
+
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
@@ -65,11 +69,13 @@ struct fimd_driver_data {
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
+ unsigned int has_limited_fmt:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.timing_base = 0x0,
.has_clksel = 1,
+ .has_limited_fmt = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -90,6 +96,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
+ unsigned int pixel_format;
dma_addr_t dma_addr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -115,11 +122,10 @@ struct fimd_context {
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
- struct exynos_drm_panel_info *panel;
+ struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
};
-#ifdef CONFIG_OF
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
@@ -129,21 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
-#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
-#ifdef CONFIG_OF
const struct of_device_id *of_id =
of_match_device(fimd_driver_dt_match, &pdev->dev);
- if (of_id)
- return (struct fimd_driver_data *)of_id->data;
-#endif
-
- return (struct fimd_driver_data *)
- platform_get_device_id(pdev)->driver_data;
+ return (struct fimd_driver_data *)of_id->data;
}
static bool fimd_display_is_connected(struct device *dev)
@@ -157,7 +156,7 @@ static void *fimd_get_panel(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- return ctx->panel;
+ return &ctx->panel;
}
static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -237,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev)
static void fimd_commit(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
+ struct exynos_drm_panel_info *panel = &ctx->panel;
+ struct videomode *vm = &panel->vm;
struct fimd_driver_data *driver_data;
u32 val;
@@ -250,22 +249,22 @@ static void fimd_commit(struct device *dev)
writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
/* setup vertical timing values. */
- val = VIDTCON0_VBPD(timing->upper_margin - 1) |
- VIDTCON0_VFPD(timing->lower_margin - 1) |
- VIDTCON0_VSPW(timing->vsync_len - 1);
+ val = VIDTCON0_VBPD(vm->vback_porch - 1) |
+ VIDTCON0_VFPD(vm->vfront_porch - 1) |
+ VIDTCON0_VSPW(vm->vsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
/* setup horizontal timing values. */
- val = VIDTCON1_HBPD(timing->left_margin - 1) |
- VIDTCON1_HFPD(timing->right_margin - 1) |
- VIDTCON1_HSPW(timing->hsync_len - 1);
+ val = VIDTCON1_HBPD(vm->hback_porch - 1) |
+ VIDTCON1_HFPD(vm->hfront_porch - 1) |
+ VIDTCON1_HSPW(vm->hsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
/* setup horizontal and vertical display size. */
- val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1) |
- VIDTCON2_LINEVAL_E(timing->yres - 1) |
- VIDTCON2_HOZVAL_E(timing->xres - 1);
+ val = VIDTCON2_LINEVAL(vm->vactive - 1) |
+ VIDTCON2_HOZVAL(vm->hactive - 1) |
+ VIDTCON2_LINEVAL_E(vm->vactive - 1) |
+ VIDTCON2_HOZVAL_E(vm->hactive - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -396,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
win_data->bpp = overlay->bpp;
+ win_data->pixel_format = overlay->pixel_format;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -417,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
val = WINCONx_ENWIN;
- switch (win_data->bpp) {
- case 1:
- val |= WINCON0_BPPMODE_1BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_4WORD;
- break;
- case 2:
- val |= WINCON0_BPPMODE_2BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 4:
- val |= WINCON0_BPPMODE_4BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 8:
+ /*
+ * In case of s3c64xx, window 0 doesn't support alpha channel.
+ * So the request format is ARGB8888 then change it to XRGB8888.
+ */
+ if (ctx->driver_data->has_limited_fmt && !win) {
+ if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
+ win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ }
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
val |= WINCONx_BYTSWP;
break;
- case 16:
+ case DRM_FORMAT_XRGB1555:
+ val |= WINCON0_BPPMODE_16BPP_1555;
+ val |= WINCONx_HAWSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_RGB565:
val |= WINCON0_BPPMODE_16BPP_565;
val |= WINCONx_HAWSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- case 24:
+ case DRM_FORMAT_XRGB8888:
val |= WINCON0_BPPMODE_24BPP_888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- case 32:
- val |= WINCON1_BPPMODE_28BPP_A4888
+ case DRM_FORMAT_ARGB8888:
+ val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
@@ -746,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
drm_iommu_detach_device(drm_dev, dev);
}
-static int fimd_calc_clkdiv(struct fimd_context *ctx,
- struct fb_videomode *timing)
+static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
{
- unsigned long clk = clk_get_rate(ctx->lcd_clk);
- u32 retrace;
- u32 clkdiv;
- u32 best_framerate = 0;
- u32 framerate;
-
- retrace = timing->left_margin + timing->hsync_len +
- timing->right_margin + timing->xres;
- retrace *= timing->upper_margin + timing->vsync_len +
- timing->lower_margin + timing->yres;
-
- /* default framerate is 60Hz */
- if (!timing->refresh)
- timing->refresh = 60;
-
- clk /= retrace;
-
- for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
- int tmp;
-
- /* get best framerate */
- framerate = clk / clkdiv;
- tmp = timing->refresh - framerate;
- if (tmp < 0) {
- best_framerate = framerate;
- continue;
- } else {
- if (!best_framerate)
- best_framerate = framerate;
- else if (tmp < (best_framerate - framerate))
- best_framerate = framerate;
- break;
+ struct videomode *vm = &ctx->panel.vm;
+ unsigned long clk;
+
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
+ if (IS_ERR(ctx->bus_clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(ctx->bus_clk);
+ }
+
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+ if (IS_ERR(ctx->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ return PTR_ERR(ctx->lcd_clk);
+ }
+
+ clk = clk_get_rate(ctx->lcd_clk);
+ if (clk == 0) {
+ dev_err(dev, "error getting sclk_fimd clock rate\n");
+ return -EINVAL;
+ }
+
+ if (vm->pixelclock == 0) {
+ unsigned long c;
+ c = vm->hactive + vm->hback_porch + vm->hfront_porch +
+ vm->hsync_len;
+ c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
+ vm->vsync_len;
+ vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
+ if (vm->pixelclock == 0) {
+ dev_err(dev, "incorrect display timings\n");
+ return -EINVAL;
}
+ dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
+ vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
}
+ ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
+ if (ctx->clkdiv > 256) {
+ dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
+ ctx->clkdiv);
+ ctx->clkdiv = 256;
+ }
+ vm->pixelclock = clk / ctx->clkdiv;
+ DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
+ ctx->clkdiv);
- return clkdiv;
+ return 0;
}
static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -876,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
return 0;
}
+static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
+{
+ struct videomode *vm;
+ int ret;
+
+ vm = &ctx->panel.vm;
+ ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ return ret;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_VSYNC;
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_HSYNC;
+ if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_VDEN;
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+ return 0;
+}
+
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct exynos_drm_subdrv *subdrv;
- struct exynos_drm_fimd_pdata *pdata;
- struct exynos_drm_panel_info *panel;
struct resource *res;
int win;
int ret = -EINVAL;
- if (dev->of_node) {
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- DRM_ERROR("memory allocation for pdata failed\n");
- return -ENOMEM;
- }
-
- ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
- OF_USE_NATIVE_MODE);
- if (ret) {
- DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
- return ret;
- }
- } else {
- pdata = dev->platform_data;
- if (!pdata) {
- DRM_ERROR("no platform data specified\n");
- return -EINVAL;
- }
- }
-
- panel = &pdata->panel;
- if (!panel) {
- dev_err(dev, "panel is null.\n");
- return -EINVAL;
- }
+ if (!dev->of_node)
+ return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = devm_clk_get(dev, "fimd");
- if (IS_ERR(ctx->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(ctx->bus_clk);
- }
+ ret = fimd_get_platform_data(ctx, dev);
+ if (ret)
+ return ret;
- ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
- if (IS_ERR(ctx->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- return PTR_ERR(ctx->lcd_clk);
- }
+ ret = fimd_configure_clocks(ctx, dev);
+ if (ret)
+ return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev)
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
- ctx->vidcon0 = pdata->vidcon0;
- ctx->vidcon1 = pdata->vidcon1;
- ctx->default_win = pdata->default_win;
- ctx->panel = panel;
DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
@@ -973,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
- panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
-
- DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
- panel->timing.pixclock, ctx->clkdiv);
-
for (win = 0; win < WINDOWS_NR; win++)
fimd_clear_win(ctx, win);
@@ -1067,20 +1059,6 @@ static int fimd_runtime_resume(struct device *dev)
}
#endif
-static struct platform_device_id fimd_driver_ids[] = {
- {
- .name = "s3c64xx-fb",
- .driver_data = (unsigned long)&s3c64xx_fimd_driver_data,
- }, {
- .name = "exynos4-fb",
- .driver_data = (unsigned long)&exynos4_fimd_driver_data,
- }, {
- .name = "exynos5-fb",
- .driver_data = (unsigned long)&exynos5_fimd_driver_data,
- },
- {},
-};
-
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1089,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
- .id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
- .of_match_table = of_match_ptr(fimd_driver_dt_match),
+ .of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index eddea494148..3271fd4b172 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -23,6 +23,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
@@ -446,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
- if (!g2d_userptr) {
- DRM_ERROR("failed to allocate g2d_userptr.\n");
+ if (!g2d_userptr)
return ERR_PTR(-ENOMEM);
- }
atomic_set(&g2d_userptr->refcount, 1);
@@ -499,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err_free_userptr;
}
@@ -808,17 +806,8 @@ static void g2d_dma_start(struct g2d_data *g2d,
int ret;
ret = pm_runtime_get_sync(g2d->dev);
- if (ret < 0) {
- dev_warn(g2d->dev, "failed pm power on.\n");
- return;
- }
-
- ret = clk_prepare_enable(g2d->gate_clk);
- if (ret < 0) {
- dev_warn(g2d->dev, "failed to enable clock.\n");
- pm_runtime_put_sync(g2d->dev);
+ if (ret < 0)
return;
- }
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -871,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
- clk_disable_unprepare(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
@@ -1096,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
- dev_err(dev, "failed to allocate event\n");
-
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1327,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct exynos_drm_g2d_private *g2d_priv;
g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv) {
- dev_err(dev, "failed to allocate g2d private data\n");
+ if (!g2d_priv)
return -ENOMEM;
- }
g2d_priv->dev = dev;
file_priv->g2d_priv = g2d_priv;
@@ -1386,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev)
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
- if (!g2d) {
- dev_err(dev, "failed to allocate driver data\n");
+ if (!g2d)
return -ENOMEM;
- }
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1524,14 +1506,38 @@ static int g2d_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int g2d_runtime_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(g2d->gate_clk);
+
+ return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0)
+ dev_warn(dev, "failed to enable clock.\n");
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops g2d_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+};
-#ifdef CONFIG_OF
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{},
};
-#endif
struct platform_driver g2d_driver = {
.probe = g2d_probe,
@@ -1540,6 +1546,6 @@ struct platform_driver g2d_driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
- .of_match_table = of_match_ptr(exynos_g2d_match),
+ .of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c336..49f9cd23275 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
@@ -17,6 +18,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
static unsigned int convert_to_vm_err_msg(int msg)
{
@@ -135,7 +137,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
obj = &exynos_gem_obj->base;
buf = exynos_gem_obj->buffer;
- DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+ DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
/*
* do not release memory region from exporter.
@@ -152,8 +154,7 @@ out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
@@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
int ret;
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
- if (!exynos_gem_obj) {
- DRM_ERROR("failed to allocate exynos gem object\n");
+ if (!exynos_gem_obj)
return NULL;
- }
exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
EXYNOS_BO_WC, args->size);
+ /*
+ * If physically contiguous memory allocation fails and if IOMMU is
+ * supported then try to get buffer from non physically contiguous
+ * memory area.
+ */
+ if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ exynos_gem_obj = exynos_drm_gem_create(dev,
+ EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+ args->size);
+ }
+
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -703,13 +714,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -719,26 +728,6 @@ unlock:
return ret;
}
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle)
-{
- int ret;
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee45..09555afdfe9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
-/*
- * destroy memory region allocated.
- * - a gem handle and physical memory region pointed by a gem object
- * would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90b8a1a5344..cd6aebd53bd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -20,6 +20,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_gsc.h"
@@ -1337,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 8d3bc01d683..8548b974bd5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
struct drm_hdmi_context *ctx;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+ if (!ctx)
return -ENOMEM;
- }
subdrv = &ctx->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 3799d5c2b5d..fb8db037827 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
+ if (!dev->dma_parms)
+ goto error;
+
dma_set_max_seg_size(dev, 0xffffffffu);
dev->archdata.mapping = mapping;
return 0;
+error:
+ arm_iommu_release_mapping(mapping);
+ return -ENOMEM;
}
/*
@@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
GFP_KERNEL);
+ if (!subdrv_dev->dma_parms)
+ return -ENOMEM;
+
dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d2b6ab4def9..824e0705c8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
struct drm_exynos_ipp_cmd_work *cmd_work;
cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
- if (!cmd_work) {
- DRM_ERROR("failed to alloc cmd_work.\n");
+ if (!cmd_work)
return ERR_PTR(-ENOMEM);
- }
INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
@@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
struct drm_exynos_ipp_event_work *event_work;
event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
- if (!event_work) {
- DRM_ERROR("failed to alloc event_work.\n");
+ if (!event_work)
return ERR_PTR(-ENOMEM);
- }
INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
@@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
/* allocate command node */
c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
- if (!c_node) {
- DRM_ERROR("failed to allocate map node.\n");
+ if (!c_node)
return -ENOMEM;
- }
/* create property id */
ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node
mutex_lock(&c_node->mem_lock);
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
- if (!m_node) {
- DRM_ERROR("failed to allocate queue node.\n");
+ if (!m_node)
goto err_unlock;
- }
/* clear base address for error handling */
memset(&buf_info, 0x0, sizeof(buf_info));
@@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
e = kzalloc(sizeof(*e), GFP_KERNEL);
-
if (!e) {
- DRM_ERROR("failed to allocate event.\n");
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct exynos_drm_ipp_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate priv.\n");
+ if (!priv)
return -ENOMEM;
- }
priv->dev = dev;
file_priv->ipp_priv = priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 6ee55e68e0a..fcb0652e77d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -16,6 +16,7 @@
#include "exynos_drm_encoder.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
@@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
int err;
exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
- if (!exynos_plane) {
- DRM_ERROR("failed to allocate plane\n");
+ if (!exynos_plane)
return NULL;
- }
err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 49669aa24c4..7b901688def 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -21,6 +21,7 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
/*
@@ -471,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -631,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
return 0;
}
+static struct rot_limit_table rot_limit_tbl_4210 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_64K,
+ .max_h = SZ_64K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_16K,
+ .max_h = SZ_16K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_4x12 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_5250 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 1,
+ },
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+ {
+ .compatible = "samsung,exynos4210-rotator",
+ .data = &rot_limit_tbl_4210,
+ },
+ {
+ .compatible = "samsung,exynos4212-rotator",
+ .data = &rot_limit_tbl_4x12,
+ },
+ {
+ .compatible = "samsung,exynos5250-rotator",
+ .data = &rot_limit_tbl_5250,
+ },
+ {},
+};
+
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot;
struct exynos_drm_ippdrv *ippdrv;
+ const struct of_device_id *match;
int ret;
+ if (!dev->of_node) {
+ dev_err(dev, "cannot find of_node.\n");
+ return -ENODEV;
+ }
+
rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
- if (!rot) {
- dev_err(dev, "failed to allocate rot\n");
+ if (!rot)
return -ENOMEM;
- }
- rot->limit_tbl = (struct rot_limit_table *)
- platform_get_device_id(pdev)->driver_data;
+ match = of_match_node(exynos_rotator_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "failed to match node\n");
+ return -ENODEV;
+ }
+ rot->limit_tbl = (struct rot_limit_table *)match->data;
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -717,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-static struct rot_limit_table rot_limit_tbl = {
- .ycbcr420_2p = {
- .min_w = 32,
- .min_h = 32,
- .max_w = SZ_32K,
- .max_h = SZ_32K,
- .align = 3,
- },
- .rgb888 = {
- .min_w = 8,
- .min_h = 8,
- .max_w = SZ_8K,
- .max_h = SZ_8K,
- .align = 2,
- },
-};
-
-static struct platform_device_id rotator_driver_ids[] = {
- {
- .name = "exynos-rot",
- .driver_data = (unsigned long)&rot_limit_tbl,
- },
- {},
-};
-
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
@@ -803,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = {
struct platform_driver rotator_driver = {
.probe = rotator_probe,
.remove = rotator_remove,
- .id_table = rotator_driver_ids,
.driver = {
.name = "exynos-rot",
.owner = THIS_MODULE,
.pm = &rotator_pm_ops,
+ .of_match_table = exynos_rotator_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index c57c56519ad..4400330e444 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -23,6 +23,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_vidi.h"
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2f5c6942c96..a0e10aeb0e6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -32,6 +32,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <drm/exynos_drm.h>
@@ -1824,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
- if (!res->regul_bulk) {
- DRM_ERROR("failed to get memory for regulators\n");
+ if (!res->regul_bulk)
goto fail;
- }
for (i = 0; i < ARRAY_SIZE(supply); ++i) {
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
@@ -1859,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
hdmi_hdmiphy = hdmiphy;
}
-#ifdef CONFIG_OF
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
(struct device *dev)
{
@@ -1868,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
u32 value;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- DRM_ERROR("memory allocation for pdata failed\n");
+ if (!pd)
goto err_data;
- }
if (!of_find_property(np, "hpd-gpio", &value)) {
DRM_ERROR("no hpd gpio property found\n");
@@ -1885,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
err_data:
return NULL;
}
-#else
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
- (struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static struct platform_device_id hdmi_driver_types[] = {
- {
- .name = "s5pv210-hdmi",
- .driver_data = HDMI_TYPE13,
- }, {
- .name = "exynos4-hdmi",
- .driver_data = HDMI_TYPE13,
- }, {
- .name = "exynos4-hdmi14",
- .driver_data = HDMI_TYPE14,
- }, {
- .name = "exynos5-hdmi",
- .driver_data = HDMI_TYPE14,
- }, {
- /* end node */
- }
-};
-#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -1923,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
-#endif
static int hdmi_probe(struct platform_device *pdev)
{
@@ -1932,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev)
struct hdmi_context *hdata;
struct s5p_hdmi_platform_data *pdata;
struct resource *res;
+ const struct of_device_id *match;
int ret;
- if (dev->of_node) {
- pdata = drm_hdmi_dt_parse_pdata(dev);
- if (IS_ERR(pdata)) {
- DRM_ERROR("failed to parse dt\n");
- return PTR_ERR(pdata);
- }
- } else {
- pdata = dev->platform_data;
- }
+ if (!dev->of_node)
+ return -ENODEV;
- if (!pdata) {
- DRM_ERROR("no platform data specified\n");
+ pdata = drm_hdmi_dt_parse_pdata(dev);
+ if (!pdata)
return -EINVAL;
- }
- drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
- GFP_KERNEL);
- if (!drm_hdmi_ctx) {
- DRM_ERROR("failed to allocate common hdmi context.\n");
+ drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx)
return -ENOMEM;
- }
- hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
- GFP_KERNEL);
- if (!hdata) {
- DRM_ERROR("out of memory\n");
+ hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata)
return -ENOMEM;
- }
mutex_init(&hdata->hdmi_mutex);
@@ -1970,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm_hdmi_ctx);
- if (dev->of_node) {
- const struct of_device_id *match;
- match = of_match_node(of_match_ptr(hdmi_match_types),
- dev->of_node);
- if (match == NULL)
- return -ENODEV;
- hdata->type = (enum hdmi_type)match->data;
- } else {
- hdata->type = (enum hdmi_type)platform_get_device_id
- (pdev)->driver_data;
- }
+ match = of_match_node(hdmi_match_types, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ hdata->type = (enum hdmi_type)match->data;
hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
-
if (ret) {
DRM_ERROR("hdmi_resources_init failed\n");
return -EINVAL;
@@ -2141,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = hdmi_remove,
- .id_table = hdmi_driver_types,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = of_match_ptr(hdmi_match_types),
+ .of_match_table = hdmi_match_types,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6e320ae9afe..59abb1494ce 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
@@ -39,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id hdmiphy_id[] = {
- { "s5p_hdmiphy", 0 },
- { "exynos5-hdmiphy", 0 },
- { },
-};
-
-#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -57,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
-#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(hdmiphy_match_types),
+ .of_match_table = hdmiphy_match_types,
},
- .id_table = hdmiphy_id,
.probe = hdmiphy_probe,
.remove = hdmiphy_remove,
.command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c9a137caea4..63bc5f92fbb 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -30,6 +30,7 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <drm/exynos_drm.h>
@@ -1185,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev)
drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
GFP_KERNEL);
- if (!drm_hdmi_ctx) {
- DRM_ERROR("failed to allocate common hdmi context.\n");
+ if (!drm_hdmi_ctx)
return -ENOMEM;
- }
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
+ if (!ctx)
return -ENOMEM;
- }
mutex_init(&ctx->mixer_mutex);
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 7a2d40a5c1e..e9064dd9045 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -15,6 +15,7 @@ gma500_gfx-y += \
mmu.o \
power.o \
psb_drv.o \
+ gma_display.o \
psb_intel_display.o \
psb_intel_lvds.o \
psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991..162f686c532 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
+ .clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,
.hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
.restore_regs = cdv_restore_display_registers,
.power_down = cdv_power_down,
.power_up = cdv_power_up,
+ .update_wm = cdv_update_wm,
+ .disable_sr = cdv_disable_sr,
};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 9561e17621b..705c11d47d4 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -17,6 +17,7 @@
extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern const struct gma_clock_funcs cdv_clock_funcs;
extern void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
int reg);
extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-
-static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- /* FIXME: msleep ?? */
- mdelay(20);
-}
-
-
+extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
+extern void cdv_disable_sr(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7b8386fc302..661af492173 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *psb_intel_crtc =
- to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int dpll_md_reg;
u32 adpa, dpll_md;
u32 adpa_reg;
- if (psb_intel_crtc->pipe == 0)
+ if (gma_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (psb_intel_crtc->pipe == 0)
+ if (gma_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector,
+ &gma_encoder->ddc_bus->adapter);
}
static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms,
.mode_fixup = cdv_intel_crt_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
- .commit = psb_intel_encoder_commit,
+ .prepare = gma_encoder_prepare,
+ .commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set,
};
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
cdv_intel_crt_connector_helper_funcs = {
.mode_valid = cdv_intel_crt_mode_valid,
.get_modes = cdv_intel_crt_get_modes,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_connector *psb_intel_connector;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_connector *gma_connector;
+ struct gma_encoder *gma_encoder;
struct drm_connector *connector;
struct drm_encoder *encoder;
u32 i2c_reg;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
+ connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(dev, connector,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- encoder = &psb_intel_encoder->base;
+ encoder = &gma_encoder->base;
drm_encoder_init(dev, encoder,
&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
/* Set up the DDC bus. */
i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}*/
- psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
- if (!psb_intel_encoder->ddc_bus) {
+ if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed_ddc;
}
- psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ gma_encoder->type = INTEL_OUTPUT_ANALOG;
/*
psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
return;
failed_ddc:
- drm_encoder_cleanup(&psb_intel_encoder->base);
- drm_connector_cleanup(&psb_intel_connector->base);
- kfree(psb_intel_connector);
+ drm_encoder_cleanup(&gma_encoder->base);
+ drm_connector_cleanup(&gma_connector->base);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
return;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 82430ad8ba6..8fbfa06da62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -19,54 +19,20 @@
*/
#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
#include "cdv_device.h"
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
-struct cdv_intel_range_t {
- int min, max;
-};
-
-struct cdv_intel_p2_t {
- int dot_limit;
- int p2_slow, p2_fast;
-};
-
-struct cdv_intel_clock_t {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-};
-
-#define INTEL_P2_NUM 2
-
-struct cdv_intel_limit_t {
- struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
- struct cdv_intel_p2_t p2;
- bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
- int, int, struct cdv_intel_clock_t *);
-};
-
-static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
- struct drm_crtc *crtc, int target, int refclk,
- struct cdv_intel_clock_t *best_clock);
-static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock);
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
@@ -75,7 +41,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
#define CDV_LIMIT_DP_27 4
#define CDV_LIMIT_DP_100 5
-static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+static const struct gma_limit_t cdv_intel_limits[] = {
{ /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
@@ -85,9 +51,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
- .p2 = {.dot_limit = 200000,
- .p2_slow = 14, .p2_fast = 14},
- .find_pll = cdv_intel_find_best_PLL,
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
@@ -102,7 +67,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
@@ -114,7 +79,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
@@ -126,7 +91,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DP_27MHz */
.dot = {.min = 160000, .max = 272000},
@@ -255,10 +220,10 @@ void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
+ struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
{
- struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -405,31 +370,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
return 0;
}
-/*
- * Returns whether any encoder on the specified pipe is of the specified type
- */
-static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(l_entry);
- if (psb_intel_encoder->type == type)
- return true;
- }
- }
- return false;
-}
-
-static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
- int refclk)
+static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
{
- const struct cdv_intel_limit_t *limit;
- if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ const struct gma_limit_t *limit;
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* Now only single-channel LVDS is supported on CDV. If it is
* incorrect, please add the dual-channel LVDS.
@@ -438,8 +383,8 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
- } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
else
@@ -454,8 +399,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
}
/* m1 is reserved as 0 in CDV, n is a ring counter */
-static void cdv_intel_clock(struct drm_device *dev,
- int refclk, struct cdv_intel_clock_t *clock)
+static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -463,93 +407,12 @@ static void cdv_intel_clock(struct drm_device *dev,
clock->dot = clock->vco / clock->p;
}
-
-#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
-static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
- const struct cdv_intel_limit_t *limit,
- struct cdv_intel_clock_t *clock)
-{
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
- /* unnecessary to check the range of m(m1/M2)/n again */
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
- /* XXX: We may need to be checking "Dot clock"
- * depending on the multiplier, connector, etc.,
- * rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
-
- return true;
-}
-
-static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
- struct drm_crtc *crtc, int target, int refclk,
- struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk,
+ struct gma_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
- struct cdv_intel_clock_t clock;
- int err = target;
-
-
- if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
- /*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
- */
- if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- clock.m1 = 0;
- /* m1 is reserved as 0 in CDV, n is a ring counter.
- So skip the m1 loop */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
- clock.m2++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- cdv_intel_clock(dev, refclk, &clock);
-
- if (!cdv_intel_PLL_is_valid(crtc,
- limit, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
-
- return err != target;
-}
-
-static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock)
-{
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
if (refclk == 27000) {
if (target < 200000) {
clock.p1 = 2;
@@ -584,85 +447,10 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
clock.p = clock.p1 * clock.p2;
clock.vco = (refclk * clock.m) / clock.n;
clock.dot = clock.vco / clock.p;
- memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+ memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true;
}
-static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret = 0;
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* no fb bound */
- if (!crtc->fb) {
- dev_err(dev->dev, "No FB bound\n");
- goto psb_intel_pipe_cleaner;
- }
-
-
- /* We are displaying this buffer, make sure it is actually loaded
- into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
- if (ret < 0)
- goto psb_intel_pipe_set_base_exit;
- start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
-
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
-
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- dev_err(dev->dev, "Unknown color depth\n");
- ret = -EINVAL;
- goto psb_intel_pipe_set_base_exit;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- dev_dbg(dev->dev,
- "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
-
- REG_WRITE(map->base, offset);
- REG_READ(map->base);
- REG_WRITE(map->surf, start);
- REG_READ(map->surf);
-
-psb_intel_pipe_cleaner:
- /* If there was a previous display we can now unpin it */
- if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
-
-psb_intel_pipe_set_base_exit:
- gma_power_end(dev);
- return ret;
-}
-
#define FIFO_PIPEA (1 << 0)
#define FIFO_PIPEB (1 << 1)
@@ -670,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = NULL;
+ struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- psb_intel_crtc = to_psb_intel_crtc(crtc);
+ gma_crtc = to_gma_crtc(crtc);
- if (crtc->fb == NULL || !psb_intel_crtc->active)
+ if (crtc->fb == NULL || !gma_crtc->active)
return false;
return true;
}
@@ -701,29 +489,29 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev)
static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- if (psb_intel_crtc->pipe != 1)
+ if (gma_crtc->pipe != 1)
return false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+ if (gma_encoder->type == INTEL_OUTPUT_LVDS)
return true;
}
return false;
}
-static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+void cdv_disable_sr(struct drm_device *dev)
{
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
@@ -731,7 +519,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
REG_READ(FW_BLC_SELF);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* Cedarview workaround to write ovelay plane, which force to leave
* MAX_FIFO state.
@@ -739,13 +527,14 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
REG_READ(OV_OVADD);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
}
}
-static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
if (cdv_intel_single_pipe_active(dev)) {
u32 fw;
@@ -780,12 +569,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
REG_WRITE(DSPFW6, 0x10);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* enable self-refresh for single pipe active */
REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
REG_READ(FW_BLC_SELF);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
} else {
@@ -797,216 +586,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
REG_WRITE(DSPFW5, 0x01010101);
REG_WRITE(DSPFW6, 0x1d0);
- cdv_intel_wait_for_vblank(dev);
-
- cdv_intel_disable_self_refresh(dev);
-
- }
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- break;
- case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.pipe[0].palette[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- cdv_intel_disable_self_refresh(dev);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- if (psb_intel_crtc->active)
- break;
-
- psb_intel_crtc->active = true;
-
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
-
- /* Jim Bish - switch plan and pipe per scott */
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- udelay(150);
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-
- temp = REG_READ(map->status);
- temp &= ~(0xFFFF);
- temp |= PIPE_FIFO_UNDERRUN;
- REG_WRITE(map->status, temp);
- REG_READ(map->status);
-
- cdv_intel_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
- break;
- case DRM_MODE_DPMS_OFF:
- if (!psb_intel_crtc->active)
- break;
-
- psb_intel_crtc->active = false;
-
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Jim Bish - changed pipe/plane here as well. */
-
- drm_vblank_off(dev, pipe);
- /* Wait for vblank for the disable to take effect */
- cdv_intel_wait_for_vblank(dev);
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
- REG_READ(map->conf);
- }
-
- /* Wait for vblank for the disable to take effect. */
- cdv_intel_wait_for_vblank(dev);
-
- udelay(150);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- }
+ gma_wait_for_vblank(dev);
- /* Wait for the clocks to turn off. */
- udelay(150);
- break;
+ dev_priv->ops->disable_sr(dev);
}
- cdv_intel_update_watermark(dev, crtc);
- /*Set FIFO Watermarks*/
- REG_WRITE(DSPARB, 0x3F3E);
-}
-
-static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
}
-
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
@@ -1031,31 +616,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_hdmi = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- const struct cdv_intel_limit_t *limit;
+ const struct gma_limit_t *limit;
u32 ddi_select = 0;
bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- ddi_select = psb_intel_encoder->ddi_select;
- switch (psb_intel_encoder->type) {
+ ddi_select = gma_encoder->ddi_select;
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -1108,12 +693,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(adjusted_mode);
- limit = cdv_intel_limit(crtc, refclk);
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
- dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
return 0;
}
@@ -1264,7 +850,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
@@ -1275,344 +861,16 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
- cdv_intel_wait_for_vblank(dev);
-
- return 0;
-}
-
-
-/**
- * Save HW states of giving crtc
- */
-static void cdv_intel_crtc_save(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_dbg(dev->dev, "No CRTC state found\n");
- return;
- }
-
- crtc_state->saveDSPCNTR = REG_READ(map->cntr);
- crtc_state->savePIPECONF = REG_READ(map->conf);
- crtc_state->savePIPESRC = REG_READ(map->src);
- crtc_state->saveFP0 = REG_READ(map->fp0);
- crtc_state->saveFP1 = REG_READ(map->fp1);
- crtc_state->saveDPLL = REG_READ(map->dpll);
- crtc_state->saveHTOTAL = REG_READ(map->htotal);
- crtc_state->saveHBLANK = REG_READ(map->hblank);
- crtc_state->saveHSYNC = REG_READ(map->hsync);
- crtc_state->saveVTOTAL = REG_READ(map->vtotal);
- crtc_state->saveVBLANK = REG_READ(map->vblank);
- crtc_state->saveVSYNC = REG_READ(map->vsync);
- crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
-
- /*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(map->size);
- crtc_state->saveDSPPOS = REG_READ(map->pos);
-
- crtc_state->saveDSPBASE = REG_READ(map->base);
-
- DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
-}
-
-/**
- * Restore HW states of giving crtc
- */
-static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_dbg(dev->dev, "No crtc state\n");
- return;
- }
-
- DRM_DEBUG(
- "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(map->cntr),
- REG_READ(map->conf),
- REG_READ(map->src),
- REG_READ(map->fp0),
- REG_READ(map->fp1),
- REG_READ(map->dpll),
- REG_READ(map->htotal),
- REG_READ(map->hblank),
- REG_READ(map->hsync),
- REG_READ(map->vtotal),
- REG_READ(map->vblank),
- REG_READ(map->vsync),
- REG_READ(map->stride),
- REG_READ(map->size),
- REG_READ(map->pos),
- REG_READ(map->base)
- );
-
- DRM_DEBUG(
- "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
-
- if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(map->dpll,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- DRM_DEBUG("write dpll: %x\n",
- REG_READ(map->dpll));
- udelay(150);
- }
-
- REG_WRITE(map->fp0, crtc_state->saveFP0);
- REG_READ(map->fp0);
-
- REG_WRITE(map->fp1, crtc_state->saveFP1);
- REG_READ(map->fp1);
-
- REG_WRITE(map->dpll, crtc_state->saveDPLL);
- REG_READ(map->dpll);
- udelay(150);
-
- REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
- REG_WRITE(map->hblank, crtc_state->saveHBLANK);
- REG_WRITE(map->hsync, crtc_state->saveHSYNC);
- REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
- REG_WRITE(map->vblank, crtc_state->saveVBLANK);
- REG_WRITE(map->vsync, crtc_state->saveVSYNC);
- REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
-
- REG_WRITE(map->size, crtc_state->saveDSPSIZE);
- REG_WRITE(map->pos, crtc_state->saveDSPPOS);
-
- REG_WRITE(map->src, crtc_state->savePIPESRC);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
- REG_WRITE(map->conf, crtc_state->savePIPECONF);
-
- cdv_intel_wait_for_vblank(dev);
-
- REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
-
- cdv_intel_wait_for_vblank(dev);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
-}
-
-static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp;
- size_t addr = 0;
- struct gtt_range *gt;
- struct drm_gem_object *obj;
- int ret = 0;
-
- /* if we want to turn of the cursor ignore width and height */
- if (!handle) {
- /* turn off the cursor */
- temp = CURSOR_MODE_DISABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, 0);
- gma_power_end(dev);
- }
-
- /* unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- return 0;
- }
-
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- if (obj->size < width * height * 4) {
- dev_dbg(dev->dev, "buffer is to small\n");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- gt = container_of(obj, struct gtt_range, gem);
-
- /* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
- if (ret) {
- dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- goto unref_cursor;
- }
-
- addr = gt->offset; /* Or resource.start ??? */
-
- psb_intel_crtc->cursor_addr = addr;
-
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
-
- /* unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- }
-
- psb_intel_crtc->cursor_obj = obj;
- return ret;
-
-unref_cursor:
- drm_gem_object_unreference(obj);
- return ret;
-}
-
-static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
-
-
- if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
- x = -x;
- }
- if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
- y = -y;
- }
-
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+ gma_wait_for_vblank(dev);
- adder = psb_intel_crtc->cursor_addr;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
- gma_power_end(dev);
- }
return 0;
}
-static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t start, uint32_t size)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int i;
- int end = (start + size > 256) ? 256 : start + size;
-
- for (i = start; i < end; i++) {
- psb_intel_crtc->lut_r[i] = red[i] >> 8;
- psb_intel_crtc->lut_g[i] = green[i] >> 8;
- psb_intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- cdv_intel_crtc_load_lut(crtc);
-}
-
-static int cdv_crtc_set_config(struct drm_mode_set *set)
-{
- int ret = 0;
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set);
-
- pm_runtime_forbid(&dev->pdev->dev);
-
- ret = drm_crtc_helper_set_config(set);
-
- pm_runtime_allow(&dev->pdev->dev);
-
- return ret;
-}
-
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
/* FIXME: why are we using this, should it be cdv_ in this tree ? */
-static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+static void i8xx_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
@@ -1625,12 +883,12 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
@@ -1703,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -1747,44 +1005,28 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
-
- kfree(psb_intel_crtc->crtc_state);
- drm_crtc_cleanup(crtc);
- kfree(psb_intel_crtc);
-}
-
-static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-
- if (crtc->fb) {
- gt = to_psb_fb(crtc->fb)->gtt;
- psb_gtt_unpin(gt);
- }
-}
-
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
- .dpms = cdv_intel_crtc_dpms,
- .mode_fixup = cdv_intel_crtc_mode_fixup,
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = cdv_intel_crtc_mode_set,
- .mode_set_base = cdv_intel_pipe_set_base,
- .prepare = cdv_intel_crtc_prepare,
- .commit = cdv_intel_crtc_commit,
- .disable = cdv_intel_crtc_disable,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
- .save = cdv_intel_crtc_save,
- .restore = cdv_intel_crtc_restore,
- .cursor_set = cdv_intel_crtc_cursor_set,
- .cursor_move = cdv_intel_crtc_cursor_move,
- .gamma_set = cdv_intel_crtc_gamma_set,
- .set_config = cdv_crtc_set_config,
- .destroy = cdv_intel_crtc_destroy,
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs cdv_clock_funcs = {
+ .clock = cdv_intel_clock,
+ .limit = cdv_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 88d9ef6b5b4..f4eb43573ca 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
+#include "gma_display.h"
#include <drm/drm_dp_helper.h>
#define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct psb_intel_encoder *encoder;
+ struct gma_encoder *encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
uint8_t train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
-static bool is_edp(struct psb_intel_encoder *encoder)
+static bool is_edp(struct gma_encoder *encoder)
{
return encoder->type == INTEL_OUTPUT_EDP;
}
-static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
static int
-cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
}
static int
-cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 19) / 20;
}
-static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
msleep(intel_dp->panel_power_up_delay);
}
-static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
}
/* Returns true if the panel was already on when called */
-static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
return false;
}
-static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
DRM_DEBUG_KMS("Over\n");
}
-static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
gma_backlight_enable(dev);
}
-static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
}
static int
-cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
/* Write data to the aux channel in native mode */
static int
-cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
/* Write a single byte to the aux channel in native mode */
static int
-cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
uint16_t address, uint8_t byte)
{
return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
/* read bytes from a native aux channel */
static int
-cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
struct cdv_intel_dp *intel_dp = container_of(adapter,
struct cdv_intel_dp,
adapter);
- struct psb_intel_encoder *encoder = intel_dp->encoder;
+ struct gma_encoder *encoder = intel_dp->encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+cdv_intel_dp_i2c_init(struct gma_connector *connector,
+ struct gma_encoder *encoder, const char *name)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
struct drm_display_mode *adjusted_mode)
{
struct drm_psb_private *dev_priv = encoder->dev->dev_private;
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int lane_count = 4, bpp = 24;
struct cdv_intel_dp_m_n m_n;
- int pipe = intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct psb_intel_encoder *intel_encoder;
+ struct gma_encoder *intel_encoder;
struct cdv_intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
- intel_encoder = to_psb_intel_encoder(encoder);
+ intel_encoder = to_gma_encoder(encoder);
intel_dp = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
- if (intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
else
pfit_control = 0;
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
/* If the sink supports it, try to set the power state appropriately */
-static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
static void cdv_intel_dp_commit(struct drm_encoder *encoder)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
static void
cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
* cases where the sink may still be asleep.
*/
static bool
-cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
uint8_t *recv, int recv_bytes)
{
int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
* link status information
*/
static bool
-cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
*/
static void
-cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
}
static bool
-cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
static bool
-cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+cdv_intel_dplink_set_level(struct gma_encoder *encoder,
uint8_t dp_train_pat)
{
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
}
static void
-cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
/* Enable corresponding port and start training pattern 1 */
static void
-cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
/* Enable output, wait for it to become active */
REG_WRITE(intel_dp->output_reg, reg);
REG_READ(intel_dp->output_reg);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
DRM_DEBUG_KMS("Link config\n");
/* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
}
static void
-cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
}
static void
-cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+cdv_intel_dp_link_down(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
REG_READ(intel_dp->output_reg);
}
-static enum drm_connector_status
-cdv_dp_detect(struct psb_intel_encoder *encoder)
+static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
static enum drm_connector_status
cdv_intel_dp_detect(struct drm_connector *connector, bool force)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
static int cdv_intel_dp_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct edid *edid = NULL;
int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
static bool
cdv_intel_dp_detect_audio(struct drm_connector *connector)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
struct edid *edid;
bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -1700,11 +1701,10 @@ done:
static void
cdv_intel_dp_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
- if (is_edp(psb_intel_encoder)) {
+ if (is_edp(gma_encoder)) {
/* cdv_intel_panel_destroy_backlight(connector->dev); */
if (intel_dp->panel_fixed_mode) {
kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
.get_modes = cdv_intel_dp_get_modes,
.mode_valid = cdv_intel_dp_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
void
cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct cdv_intel_dp *intel_dp;
const char *name = NULL;
int type = DRM_MODE_CONNECTOR_DisplayPort;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto err_connector;
intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
if (type == DRM_MODE_CONNECTOR_DisplayPort)
- psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
else
- psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+ gma_encoder->type = INTEL_OUTPUT_EDP;
- psb_intel_encoder->dev_priv=intel_dp;
- intel_dp->encoder = psb_intel_encoder;
+ gma_encoder->dev_priv=intel_dp;
+ intel_dp->encoder = gma_encoder;
intel_dp->output_reg = output_reg;
drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
switch (output_reg) {
case DP_B:
name = "DPDDC-B";
- psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+ gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
break;
case DP_C:
name = "DPDDC-C";
- psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+ gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
break;
}
cdv_disable_intel_clock_gating(dev);
- cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+ cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
/* FIXME:fail check */
cdv_intel_dp_add_properties(connector);
- if (is_edp(psb_intel_encoder)) {
+ if (is_edp(gma_encoder)) {
int ret;
struct edp_power_seq cur;
u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
- ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+ cdv_intel_edp_panel_vdd_on(gma_encoder);
+ ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
- cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+ cdv_intel_edp_panel_vdd_off(gma_encoder);
if (ret == 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
return;
err_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
err_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 464153d9d2d..1c0d723b8d2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
hdmib = (2 << 10);
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
hdmib |= HDMIB_PIPE_B_SELECT;
if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
static void cdv_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
}
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
static void cdv_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
static enum drm_connector_status cdv_hdmi_detect(
struct drm_connector *connector, bool force)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
hdmi_priv->has_hdmi_sink = false;
hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
bool centre;
uint64_t curValue;
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
*/
static int cdv_hdmi_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct edid *edid = NULL;
int ret = 0;
- edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms,
.mode_fixup = cdv_hdmi_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
cdv_hdmi_connector_helper_funcs = {
.get_modes = cdv_hdmi_get_modes,
.mode_valid = cdv_hdmi_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
void cdv_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int reg)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct mid_intel_hdmi_priv *hdmi_priv;
int ddc_bus;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
- GFP_KERNEL);
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
- if (!psb_intel_connector)
+ if (!gma_connector)
goto err_connector;
hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
if (!hdmi_priv)
goto err_priv;
- connector = &psb_intel_connector->base;
+ connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- encoder = &psb_intel_encoder->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
hdmi_priv->hdmi_reg = reg;
hdmi_priv->has_hdmi_sink = false;
- psb_intel_encoder->dev_priv = hdmi_priv;
+ gma_encoder->dev_priv = hdmi_priv;
drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
switch (reg) {
case SDVOB:
ddc_bus = GPIOE;
- psb_intel_encoder->ddi_select = DDI0_SELECT;
+ gma_encoder->ddi_select = DDI0_SELECT;
break;
case SDVOC:
ddc_bus = GPIOD;
- psb_intel_encoder->ddi_select = DDI1_SELECT;
+ gma_encoder->ddi_select = DDI1_SELECT;
break;
default:
DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
break;
}
- psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
- if (!psb_intel_encoder->i2c_bus) {
+ if (!gma_encoder->i2c_bus) {
dev_err(dev->dev, "No ddc adapter available!\n");
goto failed_ddc;
}
- hdmi_priv->hdmi_i2c_adapter =
- &(psb_intel_encoder->i2c_bus->adapter);
+ hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
drm_sysfs_connector_add(connector);
return;
@@ -379,7 +370,7 @@ failed_ddc:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
err_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
err_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f..20e08e65d46 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
- encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
/*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
- pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
- ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+ ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
if (ret)
return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
*/
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curValue;
if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
cdv_intel_lvds_connector_helper_funcs = {
.get_modes = cdv_intel_lvds_get_modes,
.mode_valid = cdv_intel_lvds_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
void cdv_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct cdv_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
return;
}
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ gma_encoder = kzalloc(sizeof(struct gma_encoder),
GFP_KERNEL);
- if (!psb_intel_encoder)
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
- if (!psb_intel_connector)
+ if (!gma_connector)
goto failed_connector;
lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv)
goto failed_lvds_priv;
- psb_intel_encoder->dev_priv = lvds_priv;
+ gma_encoder->dev_priv = lvds_priv;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
GPIOB,
"LVDSBLC_B");
- if (!psb_intel_encoder->i2c_bus) {
+ if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
&dev->pdev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
- psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
- dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+ gma_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
* LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
*/
/* Set up the DDC bus. */
- psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
GPIOC,
"LVDSDDC_C");
- if (!psb_intel_encoder->ddc_bus) {
+ if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* preferred mode is the right one.
*/
psb_intel_ddc_get_modes(connector,
- &psb_intel_encoder->ddc_bus->adapter);
+ &gma_encoder->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
failed_find:
printk(KERN_ERR "Failed find\n");
- if (psb_intel_encoder->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
kfree(lvds_priv);
failed_lvds_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923ab..01dd7d22576 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
if (backing) {
- if (drm_gem_private_object_init(dev,
- &backing->gem, aligned_size) == 0)
- return backing;
- psb_gtt_free_range(dev, backing);
+ drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+ return backing;
}
return NULL;
}
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
+ gma_crtc->lut_r[regno] = red >> 8;
+ gma_crtc->lut_g[regno] = green >> 8;
+ gma_crtc->lut_b[regno] = blue >> 8;
}
static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, int regno)
{
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
+ *red = gma_crtc->lut_r[regno] << 8;
+ *green = gma_crtc->lut_g[regno] << 8;
+ *blue = gma_crtc->lut_b[regno] << 8;
}
static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct drm_encoder *encoder = &psb_intel_encoder->base;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct drm_encoder *encoder = &gma_encoder->base;
int crtc_mask = 0, clone_mask = 0;
/* valid crtcs */
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_ANALOG:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
- psb_intel_connector_clones(dev, clone_mask);
+ gma_connector_clones(dev, clone_mask);
}
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 989558a9e6e..395f20b07aa 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -41,7 +41,7 @@ struct psb_fbdev {
#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
-extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+extern int gma_connector_clones(struct drm_device *dev, int type_mask);
#endif
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80..10ae8c52d06 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
/* Remove the list map if one is present */
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
/* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* What validation is needed here ? */
/* Make it mmapable */
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
- /* GEM should really work out the hash offsets for us */
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_unreference(obj);
unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * psb_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via psb_gem_dumb_create, at least
- * we hope it was created that way. i915 seems to assume the caller
- * does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* psb_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
if (gtt == NULL)
return -ENOMEM;
- if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
- goto free_gtt;
+
+ drm_gem_private_object_init(dev, &gtt->gem, size);
if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
return 0;
-free_gtt:
+
+ drm_gem_object_release(&gtt->gem);
psb_gtt_free_range(dev, gtt);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 00000000000..24e8af3d22b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include <drm/drmP.h>
+#include "gma_display.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(l_entry);
+ if (gma_encoder->type == type)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void gma_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ unsigned long start, offset;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto gma_pipe_cleaner;
+ }
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto gma_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(map->cntr);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto gma_pipe_set_base_exit;
+ }
+ REG_WRITE(map->cntr, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ /* FIXME: Investigate whether this really is the base for psb and why
+ the linear offset is named base for the other chips. map->surf
+ should be the base and map->linoff the offset for all chips */
+ if (IS_PSB(dev)) {
+ REG_WRITE(map->base, offset + start);
+ REG_READ(map->base);
+ } else {
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
+ }
+
+gma_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+gma_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/* Loads the palette/gamma unit for the CRTC with the prepared values */
+void gma_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ int palreg = map->palette;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
+ dev_priv->regs.pipe[0].palette[i] =
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 start, u32 size)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ gma_crtc->lut_r[i] = red[i] >> 8;
+ gma_crtc->lut_g[i] = green[i] >> 8;
+ gma_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ gma_crtc_load_lut(crtc);
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 temp;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+
+ if (IS_CDV(dev))
+ dev_priv->ops->disable_sr(dev);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ if (gma_crtc->active)
+ break;
+
+ gma_crtc->active = true;
+
+ /* Enable the DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
+
+ gma_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ if (!gma_crtc->active)
+ break;
+
+ gma_crtc->active = false;
+
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Turn off vblank interrupts */
+ drm_vblank_off(dev, pipe);
+
+ /* Wait for vblank for the disable to take effect */
+ gma_wait_for_vblank(dev);
+
+ /* Disable plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
+ }
+
+ /* Disable pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ gma_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ if (IS_CDV(dev))
+ dev_priv->ops->update_wm(dev, crtc);
+
+ /* Set FIFO watermarks */
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
+ struct drm_gem_object *obj;
+ void *tmp_dst, *tmp_src;
+ int ret = 0, i, cursor_pages;
+
+ /* If we didn't get a handle then turn the cursor off */
+ if (!handle) {
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* Unpin the old GEM object */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ gma_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "Buffer is too small\n");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ goto unref_cursor;
+ }
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
+
+ addr = gma_crtc->cursor_addr;
+ } else {
+ addr = gt->offset;
+ gma_crtc->cursor_addr = addr;
+ }
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old bo */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ }
+
+ gma_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
+}
+
+int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = gma_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void gma_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+void gma_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void gma_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
+void gma_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ kfree(gma_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(gma_crtc);
+}
+
+int gma_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+ ret = drm_crtc_helper_set_config(set);
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/**
+ * Save HW states of given crtc
+ */
+void gma_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+ /* NOTE: DSPSIZE DSPPOS only for psb */
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+ crtc_state->saveDSPBASE = REG_READ(map->base);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
+}
+
+/**
+ * Restore HW states of given crtc
+ */
+void gma_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ udelay(150);
+ }
+
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
+
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
+
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
+ udelay(150);
+
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+ gma_wait_for_vblank(dev);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
+}
+
+void gma_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void gma_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see psb_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void gma_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+/* Currently there is only a 1:1 mapping of encoders and connectors */
+struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ return &gma_encoder->base;
+}
+
+void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
+
+bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ GMA_PLL_INVALID("p1 out of range");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ GMA_PLL_INVALID("p out of range");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ GMA_PLL_INVALID("m2 out of range");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ GMA_PLL_INVALID("m1 out of range");
+ /* On CDV m1 is always 0 */
+ if (clock->m1 <= clock->m2 && clock->m1 != 0)
+ GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ GMA_PLL_INVALID("m out of range");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ GMA_PLL_INVALID("n out of range");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ GMA_PLL_INVALID("vco out of range");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ GMA_PLL_INVALID("dot out of range");
+
+ return true;
+}
+
+bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct gma_clock_funcs *clock_funcs =
+ to_gma_crtc(crtc)->clock_funcs;
+ struct gma_clock_t clock;
+ int err = target;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* m1 is always 0 on CDV so the outmost loop will run just once */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ (clock.m2 < clock.m1 || clock.m1 == 0) &&
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ clock_funcs->clock(refclk, &clock);
+
+ if (!clock_funcs->pll_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 00000000000..78b9f986a6e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef _GMA_DISPLAY_H_
+#define _GMA_DISPLAY_H_
+
+#include <linux/pm_runtime.h>
+
+struct gma_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct gma_range_t {
+ int min, max;
+};
+
+struct gma_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct gma_limit_t {
+ struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct gma_p2_t p2;
+ bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
+ int target, int refclk,
+ struct gma_clock_t *best_clock);
+};
+
+struct gma_clock_funcs {
+ void (*clock)(int refclk, struct gma_clock_t *clock);
+ const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
+ bool (*pll_is_valid)(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+};
+
+/* Common pipe related functions */
+extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
+extern void gma_wait_for_vblank(struct drm_device *dev);
+extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height);
+extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+extern void gma_crtc_load_lut(struct drm_crtc *crtc);
+extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 start, u32 size);
+extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
+extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void gma_crtc_prepare(struct drm_crtc *crtc);
+extern void gma_crtc_commit(struct drm_crtc *crtc);
+extern void gma_crtc_disable(struct drm_crtc *crtc);
+extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_set_config(struct drm_mode_set *set);
+
+extern void gma_crtc_save(struct drm_crtc *crtc);
+extern void gma_crtc_restore(struct drm_crtc *crtc);
+
+extern void gma_encoder_prepare(struct drm_encoder *encoder);
+extern void gma_encoder_commit(struct drm_encoder *encoder);
+extern void gma_encoder_destroy(struct drm_encoder *encoder);
+
+/* Common clock related functions */
+extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
+extern void gma_clock(int refclk, struct gma_clock_t *clock);
+extern bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+extern bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock);
+#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 1f82183536a..92babac362e 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
*/
static int psb_gtt_attach_pages(struct gtt_range *gt)
{
- struct inode *inode;
- struct address_space *mapping;
- int i;
- struct page *p;
- int pages = gt->gem.size / PAGE_SIZE;
+ struct page **pages;
WARN_ON(gt->pages);
- /* This is the shared memory object that backs the GEM resource */
- inode = file_inode(gt->gem.filp);
- mapping = inode->i_mapping;
+ pages = drm_gem_get_pages(&gt->gem, 0);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
- if (gt->pages == NULL)
- return -ENOMEM;
- gt->npage = pages;
+ gt->pages = pages;
- for (i = 0; i < pages; i++) {
- p = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(p))
- goto err;
- gt->pages[i] = p;
- }
return 0;
-
-err:
- while (i--)
- page_cache_release(gt->pages[i]);
- kfree(gt->pages);
- gt->pages = NULL;
- return PTR_ERR(p);
}
/**
@@ -240,13 +220,7 @@ err:
*/
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
- int i;
- for (i = 0; i < gt->npage; i++) {
- /* FIXME: do we need to force dirty */
- set_page_dirty(gt->pages[i]);
- page_cache_release(gt->pages[i]);
- }
- kfree(gt->pages);
+ drm_gem_put_pages(&gt->gem, gt->pages, true, false);
gt->pages = NULL;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 3abf8315f57..860a4ee9baa 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
bool centerechange;
uint64_t val;
- if (!psb_crtc)
+ if (!gma_crtc)
goto set_prop_error;
switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
(value == DRM_MODE_SCALE_NO_SCALE);
- if (psb_crtc->saved_mode.hdisplay != 0 &&
- psb_crtc->saved_mode.vdisplay != 0) {
+ if (gma_crtc->saved_mode.hdisplay != 0 &&
+ gma_crtc->saved_mode.vdisplay != 0) {
if (centerechange) {
if (!drm_crtc_helper_set_mode(encoder->crtc,
- &psb_crtc->saved_mode,
+ &gma_crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
struct drm_encoder_helper_funcs *funcs =
encoder->helper_private;
funcs->mode_set(encoder,
- &psb_crtc->saved_mode,
- &psb_crtc->saved_adjusted_mode);
+ &gma_crtc->saved_mode,
+ &gma_crtc->saved_adjusted_mode);
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb0744841..45d5af0546b 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -227,7 +227,7 @@ enum {
#define DSI_DPI_DISABLE_BTA BIT(3)
struct mdfld_dsi_connector {
- struct psb_intel_connector base;
+ struct gma_connector base;
int pipe;
void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
};
struct mdfld_dsi_encoder {
- struct psb_intel_encoder base;
+ struct gma_encoder base;
void *private;
};
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
struct drm_connector *connector)
{
- struct psb_intel_connector *psb_connector;
+ struct gma_connector *gma_connector;
- psb_connector = to_psb_intel_connector(connector);
+ gma_connector = to_gma_connector(connector);
- return container_of(psb_connector, struct mdfld_dsi_connector, base);
+ return container_of(gma_connector, struct mdfld_dsi_connector, base);
}
static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
struct drm_encoder *encoder)
{
- struct psb_intel_encoder *psb_encoder;
+ struct gma_encoder *gma_encoder;
- psb_encoder = to_psb_intel_encoder(encoder);
+ gma_encoder = to_gma_encoder(encoder);
- return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
+ return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
}
static inline struct mdfld_dsi_config *
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc4394..321c00a944e 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -23,7 +23,7 @@
#include <drm/drmP.h>
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "framebuffer.h"
#include "mdfld_output.h"
#include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
}
/* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
return;
/* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
}
/* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
return;
/* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
}
}
-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
- || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
(dev_priv->core_freq == 100 ||
dev_priv->core_freq == 200))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
- } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0;
bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_encoder *psb_intel_encoder = NULL;
+ struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_encoder *encoder;
struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- memcpy(&psb_intel_crtc->saved_mode, mode,
+ memcpy(&gma_crtc->saved_mode, mode,
sizeof(struct drm_display_mode));
- memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+ memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
- if (psb_intel_encoder)
+ if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for for the pipe enable to take effect. */
REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
.dpms = mdfld_crtc_dpms,
- .mode_fixup = psb_intel_crtc_mode_fixup,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = mdfld_crtc_mode_set,
.mode_set_base = mdfld__intel_pipe_set_base,
- .prepare = psb_intel_crtc_prepare,
- .commit = psb_intel_crtc_commit,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
};
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c..54c98962b73 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -23,7 +23,7 @@
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(map->base, REG_READ(map->base));
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
bool is_lvds = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_encoder *psb_intel_encoder = NULL;
+ struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- memcpy(&psb_intel_crtc->saved_mode,
+ memcpy(&gma_crtc->saved_mode,
mode,
sizeof(struct drm_display_mode));
- memcpy(&psb_intel_crtc->saved_adjusted_mode,
+ memcpy(&gma_crtc->saved_adjusted_mode,
adjusted_mode,
sizeof(struct drm_display_mode));
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
- if (psb_intel_encoder)
+ if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
-static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -563,24 +556,12 @@ pipe_set_base_exit:
return ret;
}
-static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void oaktrail_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
- .mode_fixup = oaktrail_crtc_mode_fixup,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
- .prepare = oaktrail_crtc_prepare,
- .commit = oaktrail_crtc_commit,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161..38153143ed8 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
-static void wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
REG_WRITE(PCH_PIPEBCONF, pipeconf);
REG_READ(PCH_PIPEBCONF);
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
gma_power_end(dev);
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
REG_READ(PCH_PIPEBCONF);
}
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* Enable plane */
temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
REG_READ(DSPBSURF);
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
}
/* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms,
.mode_fixup = oaktrail_hdmi_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
oaktrail_hdmi_connector_helper_funcs = {
.get_modes = oaktrail_hdmi_get_modes,
.mode_valid = oaktrail_hdmi_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
&oaktrail_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
return;
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48..e77d7214fca 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -43,7 +43,7 @@
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
- struct psb_intel_encoder *psb_intel_encoder,
+ struct gma_encoder *gma_encoder,
bool on)
{
u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
if (mode == DRM_MODE_DPMS_ON)
- oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
else
- oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
- oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
gma_power_end(dev);
}
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
oaktrail_lvds_get_max_backlight(dev);
- oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
}
static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
dev_priv->is_lvds_on = true;
drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
failed_find:
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (psb_intel_encoder->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
/* failed_ddc: */
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index f6f534b4197..697678619bd 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -25,7 +25,7 @@
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include "intel_bios.h"
-
+#include "psb_device.h"
static int psb_output_init(struct drm_device *dev)
{
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
+ .clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_device.h
index 3724b971e91..35e304c7f85 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -1,4 +1,6 @@
-/* copyright (c) 2008, Intel Corporation
+/*
+ * Copyright © 2013 Patrik Jakobsson
+ * Copyright © 2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
*/
-#ifndef _INTEL_DISPLAY_H_
-#define _INTEL_DISPLAY_H_
+#ifndef _PSB_DEVICE_H_
+#define _PSB_DEVICE_H_
-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+extern const struct gma_clock_funcs psb_clock_funcs;
#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea580744..fcb4e9ff1f2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
unsigned long irqflags;
int ret = -ENOMEM;
struct drm_connector *connector;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_encoder *gma_encoder;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
/* Only add backlight support if we have LVDS output */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
case INTEL_OUTPUT_MIPI:
ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector *connector;
- struct psb_intel_crtc *psb_intel_crtc;
+ struct gma_crtc *gma_crtc;
int i = 0;
int32_t obj_id;
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
connector = obj_to_connector(obj);
crtc = connector->encoder->crtc;
- psb_intel_crtc = to_psb_intel_crtc(crtc);
+ gma_crtc = to_gma_crtc(crtc);
for (i = 0; i < 256; i++)
- psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+ gma_crtc->lut_adj[i] = lut_arg->lut[i];
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
return 0;
}
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
.unlocked_ioctl = psb_unlocked_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
};
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
- DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+ DRIVER_MODESET | DRIVER_GEM ,
.load = psb_driver_load,
.unload = psb_driver_unload,
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = psb_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd1..4535ac7708f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -27,6 +27,7 @@
#include <drm/gma_drm.h>
#include "psb_reg.h"
#include "psb_intel_drv.h"
+#include "gma_display.h"
#include "intel_bios.h"
#include "gtt.h"
#include "power.h"
@@ -46,6 +47,7 @@ enum {
#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
/*
* Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
struct drm_crtc_funcs const *crtc_funcs;
+ const struct gma_clock_funcs *clock_funcs;
/* Setup hooks */
int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
int (*restore_regs)(struct drm_device *dev);
int (*power_up)(struct drm_device *dev);
int (*power_down)(struct drm_device *dev);
+ void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
+ void (*disable_sr)(struct drm_device *dev);
void (*lvds_bl_power)(struct drm_device *dev, bool on);
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6666493789d..97f8a03fee4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -19,46 +19,19 @@
*/
#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
-struct psb_intel_clock_t {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-};
-
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct psb_intel_p2_t {
- int dot_limit;
- int p2_slow, p2_fast;
-};
-
-struct psb_intel_limit_t {
- struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
- struct psb_intel_p2_t p2;
-};
-
#define INTEL_LIMIT_I9XX_SDVO_DAC 0
#define INTEL_LIMIT_I9XX_LVDS 1
-static const struct psb_intel_limit_t psb_intel_limits[] = {
+static const struct gma_limit_t psb_intel_limits[] = {
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1400000, .max = 2800000},
@@ -68,8 +41,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
.m2 = {.min = 3, .max = 7},
.p = {.min = 5, .max = 80},
.p1 = {.min = 1, .max = 8},
- .p2 = {.dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5},
+ .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = gma_find_best_pll,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = {.min = 20000, .max = 400000},
@@ -83,23 +56,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
- .p2 = {.dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7},
+ .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
+ .find_pll = gma_find_best_pll,
},
};
-static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
+ int refclk)
{
- const struct psb_intel_limit_t *limit;
+ const struct gma_limit_t *limit;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
return limit;
}
-static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
+static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
@@ -108,353 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
}
/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(l_entry);
- if (psb_intel_encoder->type == type)
- return true;
- }
- }
- return false;
-}
-
-#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
-/**
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-
-static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
- struct psb_intel_clock_t *clock)
-{
- const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
-
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
- if (clock->m1 <= clock->m2)
- INTELPllInvalid("m1 <= m2\n");
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
- /* XXX: We may need to be checking "Dot clock"
- * depending on the multiplier, connector, etc.,
- * rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
-
- return true;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk,
- struct psb_intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_clock_t clock;
- const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
- int err = target;
-
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
- /*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
- */
- if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
- clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- psb_intel_clock(refclk, &clock);
-
- if (!psb_intel_PLL_is_valid
- (crtc, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return err != target;
-}
-
-void psb_intel_wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
-static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret = 0;
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* no fb bound */
- if (!crtc->fb) {
- dev_dbg(dev->dev, "No FB bound\n");
- goto psb_intel_pipe_cleaner;
- }
-
- /* We are displaying this buffer, make sure it is actually loaded
- into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
- if (ret < 0)
- goto psb_intel_pipe_set_base_exit;
- start = psbfb->gtt->offset;
-
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
-
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
-
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- dev_err(dev->dev, "Unknown color depth\n");
- ret = -EINVAL;
- psb_gtt_unpin(psbfb->gtt);
- goto psb_intel_pipe_set_base_exit;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- REG_WRITE(map->base, start + offset);
- REG_READ(map->base);
-
-psb_intel_pipe_cleaner:
- /* If there was a previous display we can now unpin it */
- if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
-
-psb_intel_pipe_set_base_exit:
- gma_power_end(dev);
- return ret;
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- psb_intel_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
- REG_READ(map->conf);
- }
-
- /* Wait for vblank for the disable to take effect. */
- psb_intel_wait_for_vblank(dev);
-
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- }
-
- /* Wait for the clocks to turn off. */
- udelay(150);
- break;
- }
-
- /*Set FIFO Watermarks*/
- REG_WRITE(DSPARB, 0x3F3E);
-}
-
-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-void psb_intel_encoder_prepare(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of prepare see psb_intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-void psb_intel_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of commit see psb_intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-void psb_intel_encoder_destroy(struct drm_encoder *encoder)
-{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-
-/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
@@ -479,17 +106,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
- struct psb_intel_clock_t clock;
+ struct gma_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const struct gma_limit_t *limit;
/* No scan out no play */
if (crtc->fb == NULL) {
@@ -498,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -520,10 +147,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
refclk = 96000;
- ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
+
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
- dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
return 0;
}
@@ -661,368 +291,29 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- psb_intel_wait_for_vblank(dev);
-
- return 0;
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- int palreg = map->palette;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- case 1:
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.pipe[0].palette[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
-
-/**
- * Save HW states of giving crtc
- */
-static void psb_intel_crtc_save(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_err(dev->dev, "No CRTC state found\n");
- return;
- }
-
- crtc_state->saveDSPCNTR = REG_READ(map->cntr);
- crtc_state->savePIPECONF = REG_READ(map->conf);
- crtc_state->savePIPESRC = REG_READ(map->src);
- crtc_state->saveFP0 = REG_READ(map->fp0);
- crtc_state->saveFP1 = REG_READ(map->fp1);
- crtc_state->saveDPLL = REG_READ(map->dpll);
- crtc_state->saveHTOTAL = REG_READ(map->htotal);
- crtc_state->saveHBLANK = REG_READ(map->hblank);
- crtc_state->saveHSYNC = REG_READ(map->hsync);
- crtc_state->saveVTOTAL = REG_READ(map->vtotal);
- crtc_state->saveVBLANK = REG_READ(map->vblank);
- crtc_state->saveVSYNC = REG_READ(map->vsync);
- crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
-
- /*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(map->size);
- crtc_state->saveDSPPOS = REG_READ(map->pos);
-
- crtc_state->saveDSPBASE = REG_READ(map->base);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
-}
-
-/**
- * Restore HW states of giving crtc
- */
-static void psb_intel_crtc_restore(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_err(dev->dev, "No crtc state\n");
- return;
- }
-
- if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(map->dpll,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- udelay(150);
- }
-
- REG_WRITE(map->fp0, crtc_state->saveFP0);
- REG_READ(map->fp0);
-
- REG_WRITE(map->fp1, crtc_state->saveFP1);
- REG_READ(map->fp1);
-
- REG_WRITE(map->dpll, crtc_state->saveDPLL);
- REG_READ(map->dpll);
- udelay(150);
-
- REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
- REG_WRITE(map->hblank, crtc_state->saveHBLANK);
- REG_WRITE(map->hsync, crtc_state->saveHSYNC);
- REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
- REG_WRITE(map->vblank, crtc_state->saveVBLANK);
- REG_WRITE(map->vsync, crtc_state->saveVSYNC);
- REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
-
- REG_WRITE(map->size, crtc_state->saveDSPSIZE);
- REG_WRITE(map->pos, crtc_state->saveDSPPOS);
-
- REG_WRITE(map->src, crtc_state->savePIPESRC);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
- REG_WRITE(map->conf, crtc_state->savePIPECONF);
-
- psb_intel_wait_for_vblank(dev);
-
- REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
-
- psb_intel_wait_for_vblank(dev);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
-}
-
-static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp;
- size_t addr = 0;
- struct gtt_range *gt;
- struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
- struct drm_gem_object *obj;
- void *tmp_dst, *tmp_src;
- int ret = 0, i, cursor_pages;
-
- /* if we want to turn of the cursor ignore width and height */
- if (!handle) {
- /* turn off the cursor */
- temp = CURSOR_MODE_DISABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, 0);
- gma_power_end(dev);
- }
-
- /* Unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- return 0;
- }
-
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- if (obj->size < width * height * 4) {
- dev_dbg(dev->dev, "buffer is to small\n");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- gt = container_of(obj, struct gtt_range, gem);
-
- /* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
- if (ret) {
- dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- goto unref_cursor;
- }
-
- if (dev_priv->ops->cursor_needs_phys) {
- if (cursor_gt == NULL) {
- dev_err(dev->dev, "No hardware cursor mem available");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- /* Prevent overflow */
- if (gt->npage > 4)
- cursor_pages = 4;
- else
- cursor_pages = gt->npage;
-
- /* Copy the cursor to cursor mem */
- tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
- for (i = 0; i < cursor_pages; i++) {
- tmp_src = kmap(gt->pages[i]);
- memcpy(tmp_dst, tmp_src, PAGE_SIZE);
- kunmap(gt->pages[i]);
- tmp_dst += PAGE_SIZE;
- }
-
- addr = psb_intel_crtc->cursor_addr;
- } else {
- addr = gt->offset; /* Or resource.start ??? */
- psb_intel_crtc->cursor_addr = addr;
- }
-
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
-
- /* unpin the old bo */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- }
-
- psb_intel_crtc->cursor_obj = obj;
- return ret;
-
-unref_cursor:
- drm_gem_object_unreference(obj);
- return ret;
-}
-
-static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t addr;
-
-
- if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
- x = -x;
- }
- if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
- y = -y;
- }
-
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
-
- addr = psb_intel_crtc->cursor_addr;
+ gma_wait_for_vblank(dev);
- if (gma_power_begin(dev, false)) {
- REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
- gma_power_end(dev);
- }
return 0;
}
-static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t type, uint32_t size)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
-
- for (i = 0; i < 256; i++) {
- psb_intel_crtc->lut_r[i] = red[i] >> 8;
- psb_intel_crtc->lut_g[i] = green[i] >> 8;
- psb_intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- psb_intel_crtc_load_lut(crtc);
-}
-
-static int psb_crtc_set_config(struct drm_mode_set *set)
-{
- int ret;
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set);
-
- pm_runtime_forbid(&dev->pdev->dev);
- ret = drm_crtc_helper_set_config(set);
- pm_runtime_allow(&dev->pdev->dev);
- return ret;
-}
-
/* Returns the clock of the currently programmed mode of the given pipe. */
static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
- struct psb_intel_clock_t clock;
+ struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
@@ -1092,8 +383,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
struct drm_display_mode *mode;
int htot;
int hsync;
@@ -1136,58 +427,30 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct gtt_range *gt;
-
- /* Unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- if (psb_intel_crtc->cursor_gt != NULL)
- psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
- kfree(psb_intel_crtc->crtc_state);
- drm_crtc_cleanup(crtc);
- kfree(psb_intel_crtc);
-}
-
-static void psb_intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-
- if (crtc->fb) {
- gt = to_psb_fb(crtc->fb)->gtt;
- psb_gtt_unpin(gt);
- }
-}
-
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
- .dpms = psb_intel_crtc_dpms,
- .mode_fixup = psb_intel_crtc_mode_fixup,
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = psb_intel_crtc_mode_set,
- .mode_set_base = psb_intel_pipe_set_base,
- .prepare = psb_intel_crtc_prepare,
- .commit = psb_intel_crtc_commit,
- .disable = psb_intel_crtc_disable,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
- .save = psb_intel_crtc_save,
- .restore = psb_intel_crtc_restore,
- .cursor_set = psb_intel_crtc_cursor_set,
- .cursor_move = psb_intel_crtc_cursor_move,
- .gamma_set = psb_intel_crtc_gamma_set,
- .set_config = psb_crtc_set_config,
- .destroy = psb_intel_crtc_destroy,
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs psb_clock_funcs = {
+ .clock = psb_intel_clock,
+ .limit = psb_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
};
/*
@@ -1195,7 +458,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
* to zero. This is a workaround for h/w defect on Oaktrail
*/
static void psb_intel_cursor_init(struct drm_device *dev,
- struct psb_intel_crtc *psb_intel_crtc)
+ struct gma_crtc *gma_crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
@@ -1208,88 +471,87 @@ static void psb_intel_cursor_init(struct drm_device *dev,
*/
cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
if (!cursor_gt) {
- psb_intel_crtc->cursor_gt = NULL;
+ gma_crtc->cursor_gt = NULL;
goto out;
}
- psb_intel_crtc->cursor_gt = cursor_gt;
- psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+ gma_crtc->cursor_gt = cursor_gt;
+ gma_crtc->cursor_addr = dev_priv->stolen_base +
cursor_gt->offset;
} else {
- psb_intel_crtc->cursor_gt = NULL;
+ gma_crtc->cursor_gt = NULL;
}
out:
- REG_WRITE(control[psb_intel_crtc->pipe], 0);
- REG_WRITE(base[psb_intel_crtc->pipe], 0);
+ REG_WRITE(control[gma_crtc->pipe], 0);
+ REG_WRITE(base[gma_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc;
+ struct gma_crtc *gma_crtc;
int i;
uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
- psb_intel_crtc =
- kzalloc(sizeof(struct psb_intel_crtc) +
- (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
- if (psb_intel_crtc == NULL)
+ gma_crtc = kzalloc(sizeof(struct gma_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+ if (gma_crtc == NULL)
return;
- psb_intel_crtc->crtc_state =
+ gma_crtc->crtc_state =
kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
- if (!psb_intel_crtc->crtc_state) {
+ if (!gma_crtc->crtc_state) {
dev_err(dev->dev, "Crtc state error: No memory\n");
- kfree(psb_intel_crtc);
+ kfree(gma_crtc);
return;
}
/* Set the CRTC operations from the chip specific data */
- drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+ drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
- drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
- psb_intel_crtc->pipe = pipe;
- psb_intel_crtc->plane = pipe;
+ /* Set the CRTC clock functions from chip specific data */
+ gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
- r_base = psb_intel_crtc->base.gamma_store;
+ drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
+ gma_crtc->pipe = pipe;
+ gma_crtc->plane = pipe;
+
+ r_base = gma_crtc->base.gamma_store;
g_base = r_base + 256;
b_base = g_base + 256;
for (i = 0; i < 256; i++) {
- psb_intel_crtc->lut_r[i] = i;
- psb_intel_crtc->lut_g[i] = i;
- psb_intel_crtc->lut_b[i] = i;
+ gma_crtc->lut_r[i] = i;
+ gma_crtc->lut_g[i] = i;
+ gma_crtc->lut_b[i] = i;
r_base[i] = i << 8;
g_base[i] = i << 8;
b_base[i] = i << 8;
- psb_intel_crtc->lut_adj[i] = 0;
+ gma_crtc->lut_adj[i] = 0;
}
- psb_intel_crtc->mode_dev = mode_dev;
- psb_intel_crtc->cursor_addr = 0;
+ gma_crtc->mode_dev = mode_dev;
+ gma_crtc->cursor_addr = 0;
- drm_crtc_helper_add(&psb_intel_crtc->base,
+ drm_crtc_helper_add(&gma_crtc->base,
dev_priv->ops->crtc_helper);
/* Setup the array of drm_connector pointer array */
- psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+ gma_crtc->mode_set.crtc = &gma_crtc->base;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
- &psb_intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
- &psb_intel_crtc->base;
- psb_intel_crtc->mode_set.connectors =
- (struct drm_connector **) (psb_intel_crtc + 1);
- psb_intel_crtc->mode_set.num_connectors = 0;
- psb_intel_cursor_init(dev, psb_intel_crtc);
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
+ gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
+ gma_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, gma_crtc);
/* Set to true so that the pipe is forced off on initial config. */
- psb_intel_crtc->active = true;
+ gma_crtc->active = true;
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1298,7 +560,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
- struct psb_intel_crtc *crtc;
+ struct gma_crtc *crtc;
if (!dev_priv) {
dev_err(dev->dev, "called with no initialization\n");
@@ -1313,7 +575,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return -EINVAL;
}
- crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -1324,14 +586,14 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
struct drm_crtc *crtc = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- if (psb_intel_crtc->pipe == pipe)
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ if (gma_crtc->pipe == pipe)
break;
}
return crtc;
}
-int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+int gma_connector_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
struct drm_connector *connector;
@@ -1339,30 +601,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- if (type_mask & (1 << psb_intel_encoder->type))
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ if (type_mask & (1 << gma_encoder->type))
index_mask |= (1 << entry);
entry++;
}
return index_mask;
}
-
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
-{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
-
- return &psb_intel_encoder->base;
-}
-
-void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
- struct psb_intel_encoder *encoder)
-{
- connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
- &encoder->base);
-}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 4dcae421a58..bde27fdb41b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -24,6 +24,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <linux/gpio.h>
+#include "gma_display.h"
/*
* Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
u8 slave_addr;
};
-struct psb_intel_encoder {
+struct gma_encoder {
struct drm_encoder base;
int type;
bool needs_tv_clock;
- void (*hot_plug)(struct psb_intel_encoder *);
+ void (*hot_plug)(struct gma_encoder *);
int crtc_mask;
int clone_mask;
u32 ddi_select; /* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
struct psb_intel_i2c_chan *ddc_bus;
};
-struct psb_intel_connector {
+struct gma_connector {
struct drm_connector base;
- struct psb_intel_encoder *encoder;
+ struct gma_encoder *encoder;
};
struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
uint32_t savePalette[256];
};
-struct psb_intel_crtc {
+struct gma_crtc {
struct drm_crtc base;
int pipe;
int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
+
+ const struct gma_clock_funcs *clock_funcs;
};
-#define to_psb_intel_crtc(x) \
- container_of(x, struct psb_intel_crtc, base)
-#define to_psb_intel_connector(x) \
- container_of(x, struct psb_intel_connector, base)
-#define to_psb_intel_encoder(x) \
- container_of(x, struct psb_intel_encoder, base)
+#define to_gma_crtc(x) \
+ container_of(x, struct gma_crtc, base)
+#define to_gma_connector(x) \
+ container_of(x, struct gma_connector, base)
+#define to_gma_encoder(x) \
+ container_of(x, struct gma_encoder, base)
#define to_psb_intel_framebuffer(x) \
container_of(x, struct psb_intel_framebuffer, base)
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
-extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
-extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
-extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
+extern void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder);
-static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+static inline struct gma_encoder *gma_attached_encoder(
struct drm_connector *connector)
{
- return to_psb_intel_connector(connector)->encoder;
+ return to_gma_connector(connector)->encoder;
}
-extern void psb_intel_connector_attach_encoder(
- struct psb_intel_connector *connector,
- struct psb_intel_encoder *encoder);
-
-extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
- *connector);
-
extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-extern void psb_intel_wait_for_vblank(struct drm_device *dev);
extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e619..32342f6990d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
u32 pp_status;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
- if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
/* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct psb_intel_crtc *psb_intel_crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
- if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
- if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+ if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
printk(KERN_ERR "Can't support LVDS on pipe A\n");
return false;
}
- if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+ if (IS_MRST(dev) && gma_crtc->pipe != 0) {
printk(KERN_ERR "Must use PIPE A\n");
return false;
}
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
int ret = 0;
if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
*/
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
if (lvds_priv->ddc_bus)
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
return -1;
if (!strcmp(property->name, "scaling mode")) {
- struct psb_intel_crtc *crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curval;
if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
psb_intel_lvds_connector_helper_funcs = {
.get_modes = psb_intel_lvds_get_modes,
.mode_valid = psb_intel_lvds_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
void psb_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct psb_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
u32 lvds;
int pipe;
- psb_intel_encoder =
- kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder) {
- dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder) {
+ dev_err(dev->dev, "gma_encoder allocation error\n");
return;
}
- psb_intel_connector =
- kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector) {
- dev_err(dev->dev, "psb_intel_connector allocation error\n");
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector) {
+ dev_err(dev->dev, "gma_connector allocation error\n");
goto failed_encoder;
}
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
goto failed_connector;
}
- psb_intel_encoder->dev_priv = lvds_priv;
+ gma_encoder->dev_priv = lvds_priv;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
&psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
failed_connector:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_encoder:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 3bc8414533c..6f01cdf5e12 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
struct psb_intel_sdvo {
- struct psb_intel_encoder base;
+ struct gma_encoder base;
struct i2c_adapter *i2c;
u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
};
struct psb_intel_sdvo_connector {
- struct psb_intel_connector base;
+ struct gma_connector base;
/* Mark the type of connector */
uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(psb_intel_attached_encoder(connector),
+ return container_of(gma_attached_encoder(connector),
struct psb_intel_sdvo, base);
}
static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
{
- return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+ return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
}
static bool
@@ -988,7 +988,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 sdvox;
struct psb_intel_sdvo_in_out_map in_out;
@@ -1071,7 +1071,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
- if (psb_intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (psb_intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1122,7 +1122,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -1837,10 +1837,8 @@ done:
static void psb_intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_sdvo *sdvo =
- to_psb_intel_sdvo(&psb_intel_encoder->base);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
}
@@ -1848,8 +1846,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
static void psb_intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder =
- &psb_intel_attached_encoder(connector)->base;
+ struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
struct drm_crtc *crtc = encoder->crtc;
@@ -1865,9 +1862,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = psb_intel_sdvo_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1883,7 +1880,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
.get_modes = psb_intel_sdvo_get_modes,
.mode_valid = psb_intel_sdvo_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1895,7 +1892,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
psb_intel_sdvo->sdvo_lvds_fixed_mode);
i2c_del_adapter(&psb_intel_sdvo->ddc);
- psb_intel_encoder_destroy(encoder);
+ gma_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2056,7 +2053,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+ gma_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
}
@@ -2076,7 +2073,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2116,7 +2113,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2155,7 +2152,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2189,7 +2186,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2541,7 +2538,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
@@ -2558,9 +2555,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
}
/* encoder type will be decided later */
- psb_intel_encoder = &psb_intel_sdvo->base;
- psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
- drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+ gma_encoder = &psb_intel_sdvo->base;
+ gma_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -2578,7 +2575,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2621,7 +2618,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return true;
err:
- drm_encoder_cleanup(&psb_intel_encoder->base);
+ drm_encoder_cleanup(&gma_encoder->base);
i2c_del_adapter(&psb_intel_sdvo->ddc);
kfree(psb_intel_sdvo);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf..b1f8fc69023 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_edid.h>
-
+#include <drm/i2c/tda998x.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -32,6 +32,11 @@ struct tda998x_priv {
uint16_t rev;
uint8_t current_page;
int dpms;
+ bool is_hdmi_sink;
+ u8 vip_cntrl_0;
+ u8 vip_cntrl_1;
+ u8 vip_cntrl_2;
+ struct tda998x_encoder_params params;
};
#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_MM (1 << 0)
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
# define VIP_CNTRL_5_CKCASE (1 << 0)
# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
+#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
+#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
+#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
+#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
+#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
+#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_TOP_TGL (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL (1 << 1)
+# define TBG_CNTRL_0_DE_EXT (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT (1 << 3)
# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
-# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
-# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
-# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
-# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
-# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
-# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_H_TGL (1 << 0)
+# define TBG_CNTRL_1_V_TGL (1 << 1)
+# define TBG_CNTRL_1_TGL_EN (1 << 2)
+# define TBG_CNTRL_1_X_EXT (1 << 3)
+# define TBG_CNTRL_1_H_EXT (1 << 4)
+# define TBG_CNTRL_1_V_EXT (1 << 5)
# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
+# define I2S_FORMAT(x) (((x) & 3) << 0)
+#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
+# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
+# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
+# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+# define AUDIO_DIV_SERCLK_1 0
+# define AUDIO_DIV_SERCLK_2 1
+# define AUDIO_DIV_SERCLK_4 2
+# define AUDIO_DIV_SERCLK_8 3
+# define AUDIO_DIV_SERCLK_16 4
+# define AUDIO_DIV_SERCLK_32 5
#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
# define SEL_CLK_SEL_CLK1 (1 << 0)
# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
/* Page 10h: information frames and packets */
+#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
+#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
+#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
+#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
+#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
# define AIP_CNTRL_0_LAYOUT (1 << 2)
# define AIP_CNTRL_0_ACR_MAN (1 << 5)
# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
+# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT (1 << 6)
+#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
+#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
+#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
+#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
+#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
+#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
+#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
+#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
+# define CTS_N_K(x) (((x) & 7) << 0)
+# define CTS_N_M(x) (((x) & 3) << 4)
#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
# define ENC_CNTRL_RST_ENC (1 << 0)
# define ENC_CNTRL_RST_SEL (1 << 1)
# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
+# define DIP_FLAGS_ACR (1 << 0)
+# define DIP_FLAGS_GC (1 << 1)
+#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
+# define DIP_IF_FLAGS_IF1 (1 << 1)
+# define DIP_IF_FLAGS_IF2 (1 << 2)
+# define DIP_IF_FLAGS_IF3 (1 << 3)
+# define DIP_IF_FLAGS_IF4 (1 << 4)
+# define DIP_IF_FLAGS_IF5 (1 << 5)
+#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
/* Page 12h: HDCP and OTP */
#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX4 REG(0x12, 0x9b) /* read/write */
+# define TX4_PD_RAM (1 << 1)
#define REG_TX33 REG(0x12, 0xb8) /* read/write */
# define TX33_HDMI (1 << 1)
@@ -338,6 +402,23 @@ fail:
return ret;
}
+static void
+reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[cnt+1];
+ int ret;
+
+ buf[0] = REG2ADDR(reg);
+ memcpy(&buf[1], p, cnt);
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, cnt + 1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
static uint8_t
reg_read(struct drm_encoder *encoder, uint16_t reg)
{
@@ -406,13 +487,176 @@ tda998x_reset(struct drm_encoder *encoder)
reg_write(encoder, REG_SERIALIZER, 0x00);
reg_write(encoder, REG_BUFFER_OUT, 0x00);
reg_write(encoder, REG_PLL_SCG1, 0x00);
- reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
reg_write(encoder, REG_PLL_SCGN1, 0xfa);
reg_write(encoder, REG_PLL_SCGN2, 0x00);
reg_write(encoder, REG_PLL_SCGR1, 0x5b);
reg_write(encoder, REG_PLL_SCGR2, 0x00);
reg_write(encoder, REG_PLL_SCG2, 0x10);
+
+ /* Write the default value MUX register */
+ reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+ uint8_t sum = 0;
+
+ while (bytes--)
+ sum += *buf++;
+ return (255 - sum) + 1;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+ uint8_t *buf, size_t size)
+{
+ buf[PB(0)] = tda998x_cksum(buf, size);
+
+ reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
+ reg_write_range(encoder, addr, buf, size);
+ reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[PB(5) + 1];
+
+ buf[HB(0)] = 0x84;
+ buf[HB(1)] = 0x01;
+ buf[HB(2)] = 10;
+ buf[PB(0)] = 0;
+ buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+ buf[PB(4)] = p->audio_frame[4];
+ buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+ sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ uint8_t buf[PB(13) + 1];
+
+ memset(buf, 0, sizeof(buf));
+ buf[HB(0)] = 0x82;
+ buf[HB(1)] = 0x02;
+ buf[HB(2)] = 13;
+ buf[PB(4)] = drm_match_cea_mode(mode);
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+ sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+{
+ if (on) {
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ } else {
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ }
+}
+
+static void
+tda998x_configure_audio(struct drm_encoder *encoder,
+ struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+ uint32_t n;
+
+ /* Enable audio ports */
+ reg_write(encoder, REG_ENA_AP, p->audio_cfg);
+ reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+
+ /* Set audio input source */
+ switch (p->audio_format) {
+ case AFMT_SPDIF:
+ reg_write(encoder, REG_MUX_AP, 0x40);
+ clksel_aip = AIP_CLKSEL_AIP(0);
+ /* FS64SPDIF */
+ clksel_fs = AIP_CLKSEL_FS(2);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = 0;
+ break;
+
+ case AFMT_I2S:
+ reg_write(encoder, REG_MUX_AP, 0x64);
+ clksel_aip = AIP_CLKSEL_AIP(1);
+ /* ACLK */
+ clksel_fs = AIP_CLKSEL_FS(0);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = CA_I2S_CA_I2S(0);
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
+
+ /* Enable automatic CTS generation */
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
+ reg_write(encoder, REG_CTS_N, cts_n);
+
+ /*
+ * Audio input somehow depends on HDMI line rate which is
+ * related to pixclk. Testing showed that modes with pixclk
+ * >100MHz need a larger divider while <40MHz need the default.
+ * There is no detailed info in the datasheet, so we just
+ * assume 100MHz requires larger divider.
+ */
+ if (mode->clock > 100000)
+ adiv = AUDIO_DIV_SERCLK_16;
+ else
+ adiv = AUDIO_DIV_SERCLK_8;
+ reg_write(encoder, REG_AUDIO_DIV, adiv);
+
+ /*
+ * This is the approximate value of N, which happens to be
+ * the recommended values for non-coherent clocks.
+ */
+ n = 128 * p->audio_sample_rate / 1000;
+
+ /* Write the CTS and N values */
+ buf[0] = 0x44;
+ buf[1] = 0x42;
+ buf[2] = 0x01;
+ buf[3] = n;
+ buf[4] = n >> 8;
+ buf[5] = n >> 16;
+ reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+
+ /* Set CTS clock reference */
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+ /* Reset CTS generator */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+ /* Write the channel status */
+ buf[0] = 0x04;
+ buf[1] = 0x00;
+ buf[2] = 0x00;
+ buf[3] = 0xf1;
+ reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+
+ tda998x_audio_mute(encoder, true);
+ mdelay(20);
+ tda998x_audio_mute(encoder, false);
+
+ /* Write the audio information packet */
+ tda998x_write_aif(encoder, p);
}
/* DRM encoder functions */
@@ -420,6 +664,23 @@ tda998x_reset(struct drm_encoder *encoder)
static void
tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ struct tda998x_encoder_params *p = params;
+
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->params = *p;
}
static void
@@ -436,18 +697,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- /* enable audio and video ports */
- reg_write(encoder, REG_ENA_AP, 0xff);
+ /* enable video ports, audio will be enabled later */
reg_write(encoder, REG_ENA_VP_0, 0xff);
reg_write(encoder, REG_ENA_VP_1, 0xff);
reg_write(encoder, REG_ENA_VP_2, 0xff);
/* set muxing after enabling ports: */
- reg_write(encoder, REG_VIP_CNTRL_0,
- VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
- reg_write(encoder, REG_VIP_CNTRL_1,
- VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
- reg_write(encoder, REG_VIP_CNTRL_2,
- VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
/* disable audio and video ports */
@@ -494,43 +751,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- uint16_t hs_start, hs_end, line_start, line_end;
- uint16_t vwin_start, vwin_end, de_start, de_end;
- uint16_t ref_pix, ref_line, pix_start2;
+ uint16_t ref_pix, ref_line, n_pix, n_line;
+ uint16_t hs_pix_s, hs_pix_e;
+ uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+ uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+ uint16_t vwin1_line_s, vwin1_line_e;
+ uint16_t vwin2_line_s, vwin2_line_e;
+ uint16_t de_pix_s, de_pix_e;
uint8_t reg, div, rep;
- hs_start = mode->hsync_start - mode->hdisplay;
- hs_end = mode->hsync_end - mode->hdisplay;
- line_start = 1;
- line_end = 1 + mode->vsync_end - mode->vsync_start;
- vwin_start = mode->vtotal - mode->vsync_start;
- vwin_end = vwin_start + mode->vdisplay;
- de_start = mode->htotal - mode->hdisplay;
- de_end = mode->htotal;
-
- pix_start2 = 0;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- pix_start2 = (mode->htotal / 2) + hs_start;
-
- /* TODO how is this value calculated? It is 2 for all common
- * formats in the tables in out of tree nxp driver (assuming
- * I've properly deciphered their byzantine table system)
+ /*
+ * Internally TDA998x is using ITU-R BT.656 style sync but
+ * we get VESA style sync. TDA998x is using a reference pixel
+ * relative to ITU to sync to the input frame and for output
+ * sync generation. Currently, we are using reference detection
+ * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+ * which is position of rising VS with coincident rising HS.
+ *
+ * Now there is some issues to take care of:
+ * - HDMI data islands require sync-before-active
+ * - TDA998x register values must be > 0 to be enabled
+ * - REFLINE needs an additional offset of +1
+ * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+ *
+ * So we add +1 to all horizontal and vertical register values,
+ * plus an additional +3 for REFPIX as we are using RGB input only.
*/
- ref_line = 2;
-
- /* this might changes for other color formats from the CRTC: */
- ref_pix = 3 + hs_start;
+ n_pix = mode->htotal;
+ n_line = mode->vtotal;
+
+ hs_pix_e = mode->hsync_end - mode->hdisplay;
+ hs_pix_s = mode->hsync_start - mode->hdisplay;
+ de_pix_e = mode->htotal;
+ de_pix_s = mode->htotal - mode->hdisplay;
+ ref_pix = 3 + hs_pix_s;
+
+ /*
+ * Attached LCD controllers may generate broken sync. Allow
+ * those to adjust the position of the rising VS edge by adding
+ * HSKEW to ref_pix.
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+ ref_pix += adjusted_mode->hskew;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+ ref_line = 1 + mode->vsync_start - mode->vdisplay;
+ vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = mode->vsync_start - mode->vdisplay;
+ vs1_line_e = vs1_line_s +
+ mode->vsync_end - mode->vsync_start;
+ vwin2_line_s = vwin2_line_e = 0;
+ vs2_pix_s = vs2_pix_e = 0;
+ vs2_line_s = vs2_line_e = 0;
+ } else {
+ ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
+ vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
+ vs1_line_e = vs1_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+ vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+ vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
+ vs2_line_s = vs1_line_s + mode->vtotal/2 ;
+ vs2_line_e = vs2_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ }
div = 148500 / mode->clock;
- DBG("clock=%d, div=%u", mode->clock, div);
- DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
- hs_start, hs_end, line_start, line_end);
- DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
- vwin_start, vwin_end, de_start, de_end);
- DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
- ref_line, ref_pix, pix_start2);
-
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -561,9 +853,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
PLL_SERIAL_2_SRL_PR(rep));
- reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
- reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
-
/* set color matrix bypass flag: */
reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
@@ -572,47 +861,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+ /*
+ * Sync on rising HSYNC/VSYNC
+ */
reg_write(encoder, REG_VIP_CNTRL_3, 0);
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+
+ /*
+ * TDA19988 requires high-active sync at input stage,
+ * so invert low-active sync provided by master encoder here
+ */
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+ /*
+ * Always generate sync polarity relative to input sync and
+ * revert input stage toggled sync at output stage
+ */
+ reg = TBG_CNTRL_1_TGL_EN;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+ reg |= TBG_CNTRL_1_H_TGL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg |= TBG_CNTRL_1_V_TGL;
+ reg_write(encoder, REG_TBG_CNTRL_1, reg);
reg_write(encoder, REG_VIDFORMAT, 0x00);
- reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
- reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
- reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
- reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
- reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
- reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
- reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
- reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
- reg_write16(encoder, REG_DE_START_MSB, de_start);
- reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+ reg_write16(encoder, REG_NPIX_MSB, n_pix);
+ reg_write16(encoder, REG_NLINE_MSB, n_line);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+ reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
+ reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
+ reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
+ reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
reg_write(encoder, REG_ENABLE_SPACE, 0x01);
}
- reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
- reg_write16(encoder, REG_REFLINE_MSB, ref_line);
-
- reg = TBG_CNTRL_1_VHX_EXT_DE |
- TBG_CNTRL_1_VHX_EXT_HS |
- TBG_CNTRL_1_VHX_EXT_VS |
- TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
- TBG_CNTRL_1_VH_TGL_2;
- if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
- reg |= TBG_CNTRL_1_VH_TGL_0;
- reg_set(encoder, REG_TBG_CNTRL_1, reg);
-
/* must be last register set: */
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+
+ /* Only setup the info frames if the sink is HDMI */
+ if (priv->is_hdmi_sink) {
+ /* We need to turn HDMI HDCP stuff on to get audio through */
+ reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+ reg_set(encoder, REG_TX33, TX33_HDMI);
+
+ tda998x_write_avi(encoder, adjusted_mode);
+
+ if (priv->params.audio_cfg)
+ tda998x_configure_audio(encoder, adjusted_mode,
+ &priv->params);
+ }
}
static enum drm_connector_status
@@ -673,6 +990,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
static uint8_t *
do_get_edid(struct drm_encoder *encoder)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j = 0, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +998,9 @@ do_get_edid(struct drm_encoder *encoder)
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
+ if (priv->rev == TDA19988)
+ reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+
/* base block fetch */
if (read_edid_block(encoder, block, 0))
goto fail;
@@ -689,7 +1010,7 @@ do_get_edid(struct drm_encoder *encoder)
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
- return block;
+ goto done;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
@@ -716,9 +1037,15 @@ do_get_edid(struct drm_encoder *encoder)
block = new;
}
+done:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
+
return block;
fail:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
dev_warn(encoder->dev->dev, "failed to read EDID\n");
kfree(block);
return NULL;
@@ -728,12 +1055,14 @@ static int
tda998x_encoder_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
struct edid *edid = (struct edid *)do_get_edid(encoder);
int n = 0;
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
kfree(edid);
}
@@ -807,6 +1136,10 @@ tda998x_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
priv->current_page = 0;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489..ab1892eb107 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 2e91fc3580b..d8180d22ced 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f..d4d16eddd65 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3..b8449a84a0d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
+ i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_sprite.o \
intel_opregion.o \
intel_sideband.o \
+ intel_uncore.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 757e0fa1104..af42e94f684 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- idf |= CH7xxx_IDF_HSP;
+ idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057..55ab9246e1b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,8 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <generated/utsrelease.h>
+#include <linux/list_sort.h>
+#include <asm/msr-index.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static const char *cache_level_str(int type)
+static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return " snooped (LLC)";
- case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
- default: return "";
- }
+ return obj->has_global_gtt_mapping ? "g" : " ";
}
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+ struct i915_vma *vma;
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
+ get_global_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
- cache_level_str(obj->cache_level),
+ i915_cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
if (obj->pin_count)
seq_printf(m, " (pinned x %d)", obj->pin_count);
+ if (obj->pin_display)
+ seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!i915_is_ggtt(vma->vm))
+ seq_puts(m, " (pp");
+ else
+ seq_puts(m, " (g");
+ seq_printf(m, "gtt offset: %08lx, size: %08lx)",
+ vma->node.start, vma->node.size);
+ }
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_vma *vma;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
if (ret)
return ret;
+ /* FIXME: the user of this interface might want more than just GGTT */
switch (list) {
case ACTIVE_LIST:
- seq_printf(m, "Active:\n");
- head = &dev_priv->mm.active_list;
+ seq_puts(m, "Active:\n");
+ head = &vm->active_list;
break;
case INACTIVE_LIST:
- seq_printf(m, "Inactive:\n");
- head = &dev_priv->mm.inactive_list;
+ seq_puts(m, "Inactive:\n");
+ head = &vm->inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, head, mm_list) {
+ list_for_each_entry(vma, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj);
+ describe_obj(m, vma->obj);
seq_printf(m, "\n");
+ total_obj_size += vma->obj->base.size;
+ total_gtt_size += vma->node.size;
+ count++;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+ return 0;
+}
+
+static int obj_rank_by_stolen(void *priv,
+ struct list_head *A, struct list_head *B)
+{
+ struct drm_i915_gem_object *a =
+ container_of(A, struct drm_i915_gem_object, obj_exec_link);
+ struct drm_i915_gem_object *b =
+ container_of(B, struct drm_i915_gem_object, obj_exec_link);
+
+ return a->stolen->start - b->stolen->start;
+}
+
+static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ LIST_HEAD(stolen);
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
+ total_obj_size += obj->base.size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ count++;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
count++;
}
+ list_sort(NULL, &stolen, obj_rank_by_stolen);
+ seq_puts(m, "Stolen:\n");
+ while (!list_empty(&stolen)) {
+ obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ list_del_init(&obj->obj_exec_link);
+ }
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space->size; \
+ size += i915_gem_obj_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space->size; \
+ mappable_size += i915_gem_obj_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
@@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
+#define count_vmas(list, member) do { \
+ list_for_each_entry(vma, list, member) { \
+ size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++count; \
+ if (vma->obj->map_and_fenceable) { \
+ mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++mappable_count; \
+ } \
+ } \
+} while (0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
u32 count, mappable_count, purgeable_count;
size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
+ struct i915_vma *vma;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.active_list, mm_list);
+ count_vmas(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.inactive_list, mm_list);
+ count_vmas(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
- size += obj->gtt_space->size;
+ size += i915_gem_obj_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += obj->gtt_space->size;
+ mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%zu [%lu] gtt total\n",
- dev_priv->gtt.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
@@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0;
}
-static int i915_gem_gtt_info(struct seq_file *m, void* data)
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
- seq_printf(m, " ");
+ seq_puts(m, " ");
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
@@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->enable_stall_check)
- seq_printf(m, "Stall check enabled, ");
+ seq_puts(m, "Stall check enabled, ");
else
- seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
if (count == 0)
- seq_printf(m, "No requests\n");
+ seq_puts(m, "No requests\n");
return 0;
}
@@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Fence %d, pin count = %d, object = ",
i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
- seq_printf(m, "unused");
+ seq_puts(m, "unused");
else
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->struct_mutex);
@@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static const char *ring_str(int ring)
-{
- switch (ring) {
- case RCS: return "render";
- case VCS: return "bsd";
- case BCS: return "blt";
- case VECS: return "vebox";
- default: return "";
- }
-}
-
-static const char *pin_flag(int pinned)
-{
- if (pinned > 0)
- return " P";
- else if (pinned < 0)
- return " p";
- else
- return "";
-}
-
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
-
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
-{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
-}
-
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- if (e->pos + len <= e->start) {
- e->pos += len;
- return false;
- }
-
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
-
- return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
-
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
-
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
- }
-
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
- }
-
- e->bytes += len;
- e->pos += len;
-}
-
-static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- len = vsnprintf(NULL, 0, f, args);
- if (!__i915_error_seek(e, len))
- return;
- }
-
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
-
- __i915_error_advance(e, len);
-}
-
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- len = strlen(str);
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
- memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
-}
-
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
- va_list args;
-
- va_start(args, f);
- i915_error_vprintf(e, f, args);
- va_end(args);
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-#define err_puts(e, s) i915_error_puts(e, s)
-
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x %8u %02x %02x %x %x",
- err->gtt_offset,
- err->size,
- err->read_domains,
- err->write_domain,
- err->rseqno, err->wseqno);
- err_puts(m, pin_flag(err->pinned));
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->ring != -1 ? " " : "");
- err_puts(m, ring_str(err->ring));
- err_puts(m, cache_level_str(err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
-{
- BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
- err_printf(m, "%s command stream:\n", ring_str(ring));
- err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
- err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
-
- if (INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
- err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
- err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][0],
- error->semaphore_seqno[ring][0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][1],
- error->semaphore_seqno[ring][1]);
- }
- err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
-struct i915_error_state_file_priv {
- struct drm_device *dev;
- struct drm_i915_error_state *error;
-};
-
-
-static int i915_error_state(struct i915_error_state_file_priv *error_priv,
- struct drm_i915_error_state_buf *m)
-
-{
- struct drm_device *dev = error_priv->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error = error_priv->error;
- struct intel_ring_buffer *ring;
- int i, j, page, offset, elt;
-
- if (!error) {
- err_printf(m, "no error state collected\n");
- return 0;
- }
-
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- for_each_ring(ring, dev_priv, i)
- i915_ring_error_state(m, dev, error, i);
-
- if (error->active_bo)
- print_error_buffers(m, "Active",
- error->active_bo,
- error->active_bo_count);
-
- if (error->pinned_bo)
- print_error_buffers(m, "Pinned",
- error->pinned_bo,
- error->pinned_bo_count);
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
- if ((obj = error->ring[i].batchbuffer)) {
- err_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- if (error->ring[i].num_requests) {
- err_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
- error->ring[i].num_requests);
- for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
- error->ring[i].requests[j].seqno,
- error->ring[i].requests[j].jiffies,
- error->ring[i].requests[j].tail);
- }
- }
-
- if ((obj = error->ring[i].ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n",
- offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- obj = error->ring[i].ctx;
- if (obj) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- offset += 16;
- }
- }
- }
-
- if (error->overlay)
- intel_overlay_print_error_state(m, error->overlay);
-
- if (error->display)
- intel_display_print_error_state(m, dev, error->display);
-
- return 0;
-}
-
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_error_state_file_priv *error_priv;
- unsigned long flags;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
@@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error_priv->error = dev_priv->gpu_error.first_error;
- if (error_priv->error)
- kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ i915_error_state_get(dev, error_priv);
file->private_data = error_priv;
@@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
{
struct i915_error_state_file_priv *error_priv = file->private_data;
- if (error_priv->error)
- kref_put(&error_priv->error->ref, i915_error_state_free);
+ i915_error_state_put(error_priv);
kfree(error_priv);
return 0;
@@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
struct drm_i915_error_state_buf error_str;
loff_t tmp_pos = 0;
ssize_t ret_count = 0;
- int ret = 0;
-
- memset(&error_str, 0, sizeof(error_str));
-
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size,
- GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
-
- if (error_str.buf == NULL) {
- error_str.size = PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL) {
- error_str.size = 128;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL)
- return -ENOMEM;
-
- error_str.start = *pos;
+ int ret;
- ret = i915_error_state(error_priv, &error_str);
+ ret = i915_error_state_buf_init(&error_str, count, *pos);
if (ret)
- goto out;
+ return ret;
- if (error_str.bytes == 0 && error_str.err) {
- ret = error_str.err;
+ ret = i915_error_state_to_str(&error_str, error_priv);
+ if (ret)
goto out;
- }
ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
error_str.buf,
@@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
else
*pos = error_str.start + ret_count;
out:
- kfree(error_str.buf);
+ i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
@@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
(freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- seq_printf(m, "no P-state info available\n");
+ seq_puts(m, "no P-state info available\n");
}
return 0;
@@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
- seq_printf(m, "Current RS state: ");
+ seq_puts(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case RSX_STATUS_RC1:
- seq_printf(m, "RC1\n");
+ seq_puts(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
- seq_printf(m, "RC1E\n");
+ seq_puts(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
- seq_printf(m, "RS1\n");
+ seq_puts(m, "RS1\n");
break;
case RSX_STATUS_RS2:
- seq_printf(m, "RS2 (RC6)\n");
+ seq_puts(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
- seq_printf(m, "RC3 (RC6+)\n");
+ seq_puts(m, "RC3 (RC6+)\n");
break;
default:
- seq_printf(m, "unknown\n");
+ seq_puts(m, "unknown\n");
break;
}
@@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
- int count=0, ret;
-
+ int count = 0, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
- seq_printf(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
+ seq_puts(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m)
}
gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m)
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_printf(m, "Current RC state: ");
+ seq_puts(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_printf(m, "Core Power Down\n");
+ seq_puts(m, "Core Power Down\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case GEN6_RC3:
- seq_printf(m, "RC3\n");
+ seq_puts(m, "RC3\n");
break;
case GEN6_RC6:
- seq_printf(m, "RC6\n");
+ seq_puts(m, "RC6\n");
break;
case GEN6_RC7:
- seq_printf(m, "RC7\n");
+ seq_puts(m, "RC7\n");
break;
default:
- seq_printf(m, "Unknown\n");
+ seq_puts(m, "Unknown\n");
break;
}
@@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!I915_HAS_FBC(dev)) {
- seq_printf(m, "FBC unsupported on this chipset\n");
+ seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
if (intel_fbc_enabled(dev)) {
- seq_printf(m, "FBC enabled\n");
+ seq_puts(m, "FBC enabled\n");
} else {
- seq_printf(m, "FBC disabled: ");
- switch (dev_priv->no_fbc_reason) {
+ seq_puts(m, "FBC disabled: ");
+ switch (dev_priv->fbc.no_fbc_reason) {
+ case FBC_OK:
+ seq_puts(m, "FBC actived, but currently disabled in hardware");
+ break;
+ case FBC_UNSUPPORTED:
+ seq_puts(m, "unsupported by this chipset");
+ break;
case FBC_NO_OUTPUT:
- seq_printf(m, "no outputs");
+ seq_puts(m, "no outputs");
break;
case FBC_STOLEN_TOO_SMALL:
- seq_printf(m, "not enough stolen memory");
+ seq_puts(m, "not enough stolen memory");
break;
case FBC_UNSUPPORTED_MODE:
- seq_printf(m, "mode not supported");
+ seq_puts(m, "mode not supported");
break;
case FBC_MODE_TOO_LARGE:
- seq_printf(m, "mode too large");
+ seq_puts(m, "mode too large");
break;
case FBC_BAD_PLANE:
- seq_printf(m, "FBC unsupported on plane");
+ seq_puts(m, "FBC unsupported on plane");
break;
case FBC_NOT_TILED:
- seq_printf(m, "scanout buffer not tiled");
+ seq_puts(m, "scanout buffer not tiled");
break;
case FBC_MULTIPLE_PIPES:
- seq_printf(m, "multiple pipes are enabled");
+ seq_puts(m, "multiple pipes are enabled");
break;
case FBC_MODULE_PARAM:
- seq_printf(m, "disabled per module param (default off)");
+ seq_puts(m, "disabled per module param (default off)");
+ break;
+ case FBC_CHIP_DEFAULT:
+ seq_puts(m, "disabled per chip default");
break;
default:
- seq_printf(m, "unknown reason");
+ seq_puts(m, "unknown reason");
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
return 0;
}
@@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- seq_printf(m, "unsupported on this chipset\n");
+ seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
if (dev_priv->ips.pwrctx) {
- seq_printf(m, "power context ");
+ seq_puts(m, "power context ");
describe_obj(m, dev_priv->ips.pwrctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
if (dev_priv->ips.renderctx) {
- seq_printf(m, "render context ");
+ seq_puts(m, "render context ");
describe_obj(m, dev_priv->ips.renderctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
for_each_ring(ring, dev_priv, i) {
if (ring->default_context) {
seq_printf(m, "HW default context %s ring ", ring->name);
describe_obj(m, ring->default_context->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
@@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
seq_printf(m, "forcewake count = %u\n", forcewake_count);
@@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
static const char *swizzle_string(unsigned swizzle)
{
- switch(swizzle) {
+ switch (swizzle) {
case I915_BIT_6_SWIZZLE_NONE:
return "none";
case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- seq_printf(m, "aliasing PPGTT:\n");
+ seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
if (!IS_VALLEYVIEW(dev)) {
- seq_printf(m, "unsupported\n");
+ seq_puts(m, "unsupported\n");
return 0;
}
@@ -1924,6 +1628,194 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_llc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+ return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 psrstat, psrperf;
+
+ if (!IS_HASWELL(dev)) {
+ seq_puts(m, "PSR not supported on this platform\n");
+ } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
+ seq_puts(m, "PSR enabled\n");
+ } else {
+ seq_puts(m, "PSR disabled: ");
+ switch (dev_priv->no_psr_reason) {
+ case PSR_NO_SOURCE:
+ seq_puts(m, "not supported on this platform");
+ break;
+ case PSR_NO_SINK:
+ seq_puts(m, "not supported by panel");
+ break;
+ case PSR_MODULE_PARAM:
+ seq_puts(m, "disabled by flag");
+ break;
+ case PSR_CRTC_NOT_ACTIVE:
+ seq_puts(m, "crtc not active");
+ break;
+ case PSR_PWR_WELL_ENABLED:
+ seq_puts(m, "power well enabled");
+ break;
+ case PSR_NOT_TILED:
+ seq_puts(m, "not tiled");
+ break;
+ case PSR_SPRITE_ENABLED:
+ seq_puts(m, "sprite enabled");
+ break;
+ case PSR_S3D_ENABLED:
+ seq_puts(m, "stereo 3d enabled");
+ break;
+ case PSR_INTERLACED_ENABLED:
+ seq_puts(m, "interlaced enabled");
+ break;
+ case PSR_HSW_NOT_DDIA:
+ seq_puts(m, "HSW ties PSR to DDI A (eDP)");
+ break;
+ default:
+ seq_puts(m, "unknown reason");
+ }
+ seq_puts(m, "\n");
+ return 0;
+ }
+
+ psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+
+ seq_puts(m, "PSR Current State: ");
+ switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
+ case EDP_PSR_STATUS_STATE_IDLE:
+ seq_puts(m, "Reset state\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDONACK:
+ seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDENT:
+ seq_puts(m, "SRD entry\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFOFF:
+ seq_puts(m, "Wait for buffer turn off\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFON:
+ seq_puts(m, "Wait for buffer turn on\n");
+ break;
+ case EDP_PSR_STATUS_STATE_AUXACK:
+ seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDOFFACK:
+ seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_puts(m, "Link Status: ");
+ switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
+ case EDP_PSR_STATUS_LINK_FULL_OFF:
+ seq_puts(m, "Link is fully off\n");
+ break;
+ case EDP_PSR_STATUS_LINK_FULL_ON:
+ seq_puts(m, "Link is fully on\n");
+ break;
+ case EDP_PSR_STATUS_LINK_STANDBY:
+ seq_puts(m, "Link is in standby\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "PSR Entry Count: %u\n",
+ psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
+ EDP_PSR_STATUS_COUNT_MASK);
+
+ seq_printf(m, "Max Sleep Timer Counter: %u\n",
+ psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
+ EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+
+ seq_printf(m, "Had AUX error: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+
+ seq_printf(m, "Sending AUX: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+
+ seq_printf(m, "Sending Idle: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
+
+ seq_printf(m, "Sending TP2 TP3: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+
+ seq_printf(m, "Sending TP1: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+
+ seq_printf(m, "Idle Count: %u\n",
+ psrstat & EDP_PSR_STATUS_IDLE_MASK);
+
+ psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance Counter: %u\n", psrperf);
+
+ return 0;
+}
+
+static int i915_energy_uJ(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 power;
+ u32 units;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return -ENODEV;
+
+ rdmsrl(MSR_RAPL_POWER_UNIT, power);
+ power = (power & 0x1f00) >> 8;
+ units = 1000000 / (1 << power); /* convert to uJ */
+ power = I915_READ(MCH_SECP_NRG_STTS);
+ power *= units;
+
+ seq_printf(m, "%llu", (long long unsigned)power);
+
+ return 0;
+}
+
+static int i915_pc8_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev)) {
+ seq_puts(m, "not supported\n");
+ return 0;
+ }
+
+ mutex_lock(&dev_priv->pc8.lock);
+ seq_printf(m, "Requirements met: %s\n",
+ yesno(dev_priv->pc8.requirements_met));
+ seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
+ seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
+ seq_printf(m, "IRQs disabled: %s\n",
+ yesno(dev_priv->pc8.irqs_disabled));
+ seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
+ mutex_unlock(&dev_priv->pc8.lock);
+
+ return 0;
+}
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -2006,6 +1898,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm;
+ struct i915_vma *vma, *x;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,12 +1920,17 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
- if (obj->pin_count == 0) {
- ret = i915_gem_object_unbind(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(vma, x, &vm->inactive_list,
+ mm_list) {
+ if (vma->obj->pin_count)
+ continue;
+
+ ret = i915_vma_unbind(vma);
if (ret)
goto unlock;
}
+ }
}
if (val & DROP_UNBOUND) {
@@ -2326,6 +2225,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2353,64 +2253,42 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_dpio", i915_dpio_info, 0},
+ {"i915_llc", i915_llc, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_energy_uJ", i915_energy_uJ, 0},
+ {"i915_pc8_status", i915_pc8_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+static struct i915_debugfs_files {
+ const char *name;
+ const struct file_operations *fops;
+} i915_debugfs_files[] = {
+ {"i915_wedged", &i915_wedged_fops},
+ {"i915_max_freq", &i915_max_freq_fops},
+ {"i915_min_freq", &i915_min_freq_fops},
+ {"i915_cache_sharing", &i915_cache_sharing_fops},
+ {"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_gem_drop_caches", &i915_drop_caches_fops},
+ {"i915_error_state", &i915_error_state_fops},
+ {"i915_next_seqno", &i915_next_seqno_fops},
+};
+
int i915_debugfs_init(struct drm_minor *minor)
{
- int ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_wedged",
- &i915_wedged_fops);
- if (ret)
- return ret;
+ int ret, i;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_max_freq",
- &i915_max_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_min_freq",
- &i915_min_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_cache_sharing",
- &i915_cache_sharing_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_ring_stop",
- &i915_ring_stop_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_gem_drop_caches",
- &i915_drop_caches_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_error_state",
- &i915_error_state_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_next_seqno",
- &i915_next_seqno_fops);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ i915_debugfs_files[i].name,
+ i915_debugfs_files[i].fops);
+ if (ret)
+ return ret;
+ }
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2297,18 @@ int i915_debugfs_init(struct drm_minor *minor)
void i915_debugfs_cleanup(struct drm_minor *minor)
{
+ int i;
+
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
- 1, minor);
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f4669802a0f..fdaa0915ce5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(dev);
+ break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
@@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
if (ret)
goto cleanup_vga_client;
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- if (INTEL_INFO(dev)->num_pipes == 0) {
- dev_priv->mm.suspended = 0;
+ if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
- }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
-
return 0;
cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1441,22 +1439,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
}
/**
- * intel_early_sanitize_regs - clean up BIOS state
- * @dev: DRM device
- *
- * This function must be called before we do any I915_READ or I915_WRITE. Its
- * purpose is to clean up any state left by the BIOS that may affect us when
- * reading and/or writing registers.
- */
-static void intel_early_sanitize_regs(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
-/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
@@ -1497,15 +1479,31 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->backlight.lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+
i915_dump_device_info(dev_priv);
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1531,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- intel_early_sanitize_regs(dev);
+ intel_uncore_early_sanitize(dev);
+
+ if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ }
ret = i915_gem_gtt_init(dev);
if (ret)
@@ -1567,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
- aperture_size);
+ dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -1595,8 +1603,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_irq_init(dev);
intel_pm_init(dev);
- intel_gt_sanitize(dev);
- intel_gt_init(dev);
+ intel_uncore_sanitize(dev);
+ intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1631,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- /* Start out suspended */
- dev_priv->mm.suspended = 1;
-
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
@@ -1643,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
}
+ } else {
+ /* Start out suspended in ums mode. */
+ dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1669,9 +1677,9 @@ out_gem_unload:
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
@@ -1688,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
intel_gpu_ips_teardown();
- if (HAS_POWER_WELL(dev))
+ if (HAS_POWER_WELL(dev)) {
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_set_power_well(dev, true);
i915_remove_power_well(dev);
+ }
i915_teardown_sysfs(dev);
@@ -1707,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister();
@@ -1735,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
+ cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1756,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ list_del(&dev_priv->gtt.base.global_link);
+ WARN_ON(!list_empty(&dev_priv->vm_list));
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1766,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@@ -1842,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -1862,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 45b3c030f48..ccb28ead350 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
-unsigned int i915_preliminary_hw_support __read_mostly = 0;
+int i915_enable_psr __read_mostly = 0;
+module_param_named(enable_psr, i915_enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support. (default: false)");
+ "Enable preliminary hardware support.");
int i915_disable_power_well __read_mostly = 1;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -132,6 +136,24 @@ int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+bool i915_fastboot __read_mostly = 0;
+module_param_named(fastboot, i915_fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+ "(default: false)");
+
+int i915_enable_pc8 __read_mostly = 1;
+module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
+MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
+
+int i915_pc8_timeout __read_mostly = 5000;
+module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
+MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
+
+bool i915_prefault_disable __read_mostly;
+module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -543,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
+ /* We do a lot of poking in a lot of registers, make sure they work
+ * properly. */
+ hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
drm_kms_helper_poll_disable(dev);
@@ -551,7 +576,11 @@ static int i915_drm_freeze(struct drm_device *dev)
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error = i915_gem_idle(dev);
+ int error;
+
+ mutex_lock(&dev->struct_mutex);
+ error = i915_gem_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@@ -656,7 +685,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
@@ -696,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
schedule_work(&dev_priv->console_resume_work);
}
+ /* Undo what we did at i915_drm_freeze so the refcount goes back to the
+ * expected level. */
+ hsw_enable_package_c8(dev_priv);
+
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -706,7 +738,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -732,7 +764,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +785,6 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_I85X(dev))
- return -ENODEV;
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- if (IS_I830(dev) || IS_845G(dev)) {
- I915_WRITE(DEBUG_RESET_I830,
- DEBUG_RESET_DISPLAY |
- DEBUG_RESET_RENDER |
- DEBUG_RESET_FULL);
- POSTING_READ(DEBUG_RESET_I830);
- msleep(1);
-
- I915_WRITE(DEBUG_RESET_I830, 0);
- POSTING_READ(DEBUG_RESET_I830);
- }
-
- msleep(1);
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- return 0;
-}
-
-static int i965_reset_complete(struct drm_device *dev)
-{
- u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int i965_do_reset(struct drm_device *dev)
-{
- int ret;
- u8 gdrst;
-
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_RENDER |
- GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_MEDIA |
- GRDOM_RESET_ENABLE);
-
- return wait_for(i965_reset_complete(dev), 500);
-}
-
-static int ironlake_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst;
- int ret;
-
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
-}
-
-static int gen6_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
- unsigned long irqflags;
-
- /* Hold gt_lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-
- /* Reset the chip */
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
-
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
-
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->forcewake_count)
- dev_priv->gt.force_wake_get(dev_priv);
- else
- dev_priv->gt.force_wake_put(dev_priv);
-
- /* Restore fifo count */
- dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
- return ret;
-}
-
-int intel_gpu_reset(struct drm_device *dev)
-{
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4: return i965_do_reset(dev);
- case 2: return i8xx_do_reset(dev);
- default: return -ENODEV;
- }
-}
-
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -955,11 +854,11 @@ int i915_reset(struct drm_device *dev)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
+ !dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev);
@@ -1110,7 +1009,6 @@ static const struct file_operations i915_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
@@ -1123,8 +1021,9 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -1154,7 +1053,7 @@ static struct drm_driver driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = i915_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
@@ -1215,136 +1114,3 @@ module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
- /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
- * the chip from rc6 before touching it for real. MI_MODE is masked,
- * hence harmless to write 0 into. */
- I915_WRITE_NOTRACE(MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-#define __i915_read(x, y) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
- unsigned long irqflags; \
- u##x val = 0; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_get(dev_priv); \
- val = read##y(dev_priv->regs + reg); \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_put(dev_priv); \
- } else { \
- val = read##y(dev_priv->regs + reg); \
- } \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val)); \
- return val; \
-}
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
-#undef __i915_read
-
-#define __i915_write(x, y) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- unsigned long irqflags; \
- u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
- write##y(val, dev_priv->regs + reg); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- hsw_unclaimed_reg_check(dev_priv, reg); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
-}
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
-#undef __i915_write
-
-static const struct register_whitelist {
- uint64_t offset;
- uint32_t size;
- uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
-} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
-};
-
-int i915_reg_read_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reg_read *reg = data;
- struct register_whitelist const *entry = whitelist;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
- break;
- }
-
- if (i == ARRAY_SIZE(whitelist))
- return -EINVAL;
-
- switch (entry->size) {
- case 8:
- reg->val = I915_READ64(reg->offset);
- break;
- case 4:
- reg->val = I915_READ(reg->offset);
- break;
- case 2:
- reg->val = I915_READ16(reg->offset);
- break;
- case 1:
- reg->val = I915_READ8(reg->offset);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1929bffc1c7..52a3785a3fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
uint32_t dpll;
+ uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv,
@@ -198,7 +201,6 @@ struct intel_ddi_plls {
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
@@ -320,8 +322,8 @@ struct drm_i915_error_state {
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
- } *active_bo, *pinned_bo;
- u32 active_bo_count, pinned_bo_count;
+ } **active_bo, **pinned_bo;
+ u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
@@ -356,14 +358,16 @@ struct drm_i915_display_funcs {
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
- void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ void (*update_sprite_wm)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable);
+ bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
+ void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -376,7 +380,8 @@ struct drm_i915_display_funcs {
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj);
+ struct drm_i915_gem_object *obj,
+ uint32_t flags);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
@@ -387,11 +392,20 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
-struct drm_i915_gt_funcs {
+struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+struct intel_uncore {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+
+ struct intel_uncore_funcs funcs;
+
+ unsigned fifo_count;
+ unsigned forcewake_count;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
@@ -436,12 +450,64 @@ struct intel_device_info {
enum i915_cache_level {
I915_CACHE_NONE = 0,
- I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+ I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
+ I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
+ caches, eg sampler/render caches, and the
+ large Last-Level-Cache. LLC is coherent with
+ the CPU, but L3 is only visible to the GPU. */
+ I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
typedef uint32_t gen6_gtt_pte_t;
+struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+ unsigned long start; /* Start offset always 0 for dri2 */
+ size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+ struct page *page;
+ } scratch;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* FIXME: Need a more generic return type */
+ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level);
+ void (*clear_range)(struct i915_address_space *vm,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_address_space *vm);
+};
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +516,7 @@ typedef uint32_t gen6_gtt_pte_t;
* the spec.
*/
struct i915_gtt {
- unsigned long start; /* Start offset of used GTT */
- size_t total; /* Total size GTT can map */
+ struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
@@ -462,50 +527,47 @@ struct i915_gtt {
void __iomem *gsm;
bool do_idle_maps;
- dma_addr_t scratch_page_dma;
- struct page *scratch_page;
+
+ int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
- void (*gtt_remove)(struct drm_device *dev);
- void (*gtt_clear_range)(struct drm_device *dev,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*gtt_insert_entries)(struct drm_device *dev,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
};
-#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
- struct drm_device *dev;
+ struct i915_address_space base;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
- dma_addr_t scratch_page_dma_addr;
- /* pte functions, mirroring the interface of the global gtt. */
- void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
int (*enable)(struct drm_device *dev);
- void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
};
struct i915_ctx_hang_stats {
@@ -528,15 +590,48 @@ struct i915_hw_context {
struct i915_ctx_hang_stats hang_stats;
};
-enum no_fbc_reason {
- FBC_NO_OUTPUT, /* no outputs enabled to compress */
- FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
- FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
- FBC_MODE_TOO_LARGE, /* mode too large for compression */
- FBC_BAD_PLANE, /* fbc not supported on plane */
- FBC_NOT_TILED, /* buffer not tiled */
- FBC_MULTIPLE_PIPES, /* more than one pipe active */
- FBC_MODULE_PARAM,
+struct i915_fbc {
+ unsigned long size;
+ unsigned int fb_id;
+ enum plane plane;
+ int y;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+ } *fbc_work;
+
+ enum no_fbc_reason {
+ FBC_OK, /* FBC is enabled */
+ FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+ FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ } no_fbc_reason;
+};
+
+enum no_psr_reason {
+ PSR_NO_SOURCE, /* Not supported on platform */
+ PSR_NO_SINK, /* Not supported by panel */
+ PSR_MODULE_PARAM,
+ PSR_CRTC_NOT_ACTIVE,
+ PSR_PWR_WELL_ENABLED,
+ PSR_NOT_TILED,
+ PSR_SPRITE_ENABLED,
+ PSR_S3D_ENABLED,
+ PSR_INTERLACED_ENABLED,
+ PSR_HSW_NOT_DDIA,
};
enum intel_pch {
@@ -722,12 +817,12 @@ struct i915_suspend_saved_registers {
};
struct intel_gen6_power_mgmt {
+ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
- struct delayed_work vlv_work;
u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
+
+ /* On vlv we need to manually drop to Vmin with a delayed work. */
+ struct delayed_work vlv_work;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
@@ -793,6 +888,18 @@ struct i915_dri1_state {
uint32_t counter;
};
+struct i915_ums_state {
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int mm_suspended;
+};
+
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
@@ -801,8 +908,6 @@ struct intel_l3_parity {
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -816,37 +921,12 @@ struct i915_gem_mm {
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
- int gtt_mtrr;
-
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -865,16 +945,6 @@ struct i915_gem_mm {
*/
bool interruptible;
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -884,6 +954,7 @@ struct i915_gem_mm {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
+ spinlock_t object_stat_lock;
size_t object_memory;
u32 object_count;
};
@@ -897,6 +968,11 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -988,6 +1064,88 @@ struct intel_vbt_data {
struct child_device_config *child_dev;
};
+enum intel_ddb_partitioning {
+ INTEL_DDB_PART_1_2,
+ INTEL_DDB_PART_5_6, /* IVB+ */
+};
+
+struct intel_wm_level {
+ bool enable;
+ uint32_t pri_val;
+ uint32_t spr_val;
+ uint32_t cur_val;
+ uint32_t fbc_val;
+};
+
+/*
+ * This struct tracks the state needed for the Package C8+ feature.
+ *
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states.
+ *
+ * Our driver only allows PC8+ when all the outputs are disabled, the power well
+ * is disabled and the GPU is idle. When these conditions are met, we manually
+ * do the other conditions: disable the interrupts, clocks and switch LCPLL
+ * refclk to Fclk.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6.
+ *
+ * The interrupt disabling is part of the requirements. We can only leave the
+ * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
+ * can lock the machine.
+ *
+ * Ideally every piece of our code that needs PC8+ disabled would call
+ * hsw_disable_package_c8, which would increment disable_count and prevent the
+ * system from reaching PC8+. But we don't have a symmetric way to do this for
+ * everything, so we have the requirements_met and gpu_idle variables. When we
+ * switch requirements_met or gpu_idle to true we decrease disable_count, and
+ * increase it in the opposite case. The requirements_met variable is true when
+ * all the CRTCs, encoders and the power well are disabled. The gpu_idle
+ * variable is true when the GPU is idle.
+ *
+ * In addition to everything, we only actually enable PC8+ if disable_count
+ * stays at zero for at least some seconds. This is implemented with the
+ * enable_work variable. We do this so we don't enable/disable PC8 dozens of
+ * consecutive times when all screens are disabled and some background app
+ * queries the state of our connectors, or we have some application constantly
+ * waking up to use the GPU. Only after the enable_work function actually
+ * enables PC8+ the "enable" variable will become true, which means that it can
+ * be false even if disable_count is 0.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens, but if it actually happens we'll also update the variables
+ * inside struct regsave so when we restore the IRQs they will contain the
+ * latest expected values.
+ *
+ * For more, read "Display Sequences for Package C8" on our documentation.
+ */
+struct i915_package_c8 {
+ bool requirements_met;
+ bool gpu_idle;
+ bool irqs_disabled;
+ /* Only true after the delayed work task actually enables it. */
+ bool enabled;
+ int disable_count;
+ struct mutex lock;
+ struct delayed_work enable_work;
+
+ struct {
+ uint32_t deimr;
+ uint32_t sdeimr;
+ uint32_t gtimr;
+ uint32_t gtier;
+ uint32_t gen6_pmimr;
+ } regsave;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -998,14 +1156,7 @@ typedef struct drm_i915_private {
void __iomem *regs;
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- spinlock_t gt_lock;
+ struct intel_uncore uncore;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1042,6 +1193,7 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
u32 gt_irq_mask;
+ u32 pm_irq_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
@@ -1059,12 +1211,7 @@ typedef struct drm_i915_private {
int num_plane;
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
+ struct i915_fbc fbc;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1081,8 +1228,6 @@ typedef struct drm_i915_private {
} backlight;
/* LVDS info */
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
bool no_aux_handshake;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1250,8 @@ typedef struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
- struct i915_gtt gtt;
+ struct list_head vm_list; /* Global list of all address spaces */
+ struct i915_gtt gtt; /* VMA representing the global address space */
struct i915_gem_mm mm;
@@ -1132,6 +1278,9 @@ typedef struct drm_i915_private {
struct intel_l3_parity l3_parity;
+ /* Cannot be determined by PCIID. You must always read a register. */
+ size_t ellc_size;
+
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@@ -1142,10 +1291,7 @@ typedef struct drm_i915_private {
/* Haswell power well */
struct i915_power_well power_well;
- enum no_fbc_reason no_fbc_reason;
-
- struct drm_mm_node *compressed_fb;
- struct drm_mm_node *compressed_llb;
+ enum no_psr_reason no_psr_reason;
struct i915_gpu_error gpu_error;
@@ -1170,11 +1316,34 @@ typedef struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
+ struct {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ uint16_t pri_latency[5];
+ /* sprite */
+ uint16_t spr_latency[5];
+ /* cursor */
+ uint16_t cur_latency[5];
+ } wm;
+
+ struct i915_package_c8 pc8;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
+ /* Old ums support infrastructure, same warning applies. */
+ struct i915_ums_state ums;
} drm_i915_private_t;
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1187,7 +1356,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -1212,15 +1381,16 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
- /** This object's place on the active/inactive lists */
struct list_head ring_list;
- struct list_head mm_list;
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@@ -1287,6 +1457,7 @@ struct drm_i915_gem_object {
*/
unsigned int fault_mappable:1;
unsigned int pin_mappable:1;
+ unsigned int pin_display:1;
/*
* Is the GPU currently using a fence to access this buffer,
@@ -1294,7 +1465,7 @@ struct drm_i915_gem_object {
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
- unsigned int cache_level:2;
+ unsigned int cache_level:3;
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
@@ -1314,13 +1485,6 @@ struct drm_i915_gem_object {
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t gtt_offset;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@@ -1396,7 +1560,7 @@ struct drm_i915_file_private {
struct i915_ctx_hang_stats hang_stats;
};
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+#define INTEL_INFO(dev) (to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1414,7 +1578,6 @@ struct drm_i915_file_private {
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
@@ -1426,6 +1589,8 @@ struct drm_i915_file_private {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
@@ -1446,6 +1611,7 @@ struct drm_i915_file_private {
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -1468,8 +1634,6 @@ struct drm_i915_file_private {
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1477,8 +1641,6 @@ struct drm_i915_file_private {
#define HAS_IPS(dev) (IS_ULT(dev))
-#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1490,7 +1652,7 @@ struct drm_i915_file_private {
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1526,7 +1688,7 @@ struct drm_i915_file_private {
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-extern struct drm_ioctl_desc i915_ioctls[];
+extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
@@ -1540,9 +1702,14 @@ extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
+extern int i915_enable_psr __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly;
+extern bool i915_fastboot __read_mostly;
+extern int i915_enable_pc8 __read_mostly;
+extern int i915_pc8_timeout __read_mostly;
+extern bool i915_prefault_disable __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1578,16 +1745,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
-void i915_hangcheck_elapsed(unsigned long data);
+void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_sanitize(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
-void i915_error_state_free(struct kref *error_ref);
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_clear_errors(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1595,13 +1765,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-#ifdef CONFIG_DEBUG_FS
-extern void i915_destroy_error_state(struct drm_device *dev);
-#else
-#define i915_destroy_error_state(x)
-#endif
-
-
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1658,13 +1821,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1701,8 +1869,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1754,10 +1920,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
}
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1784,6 +1947,7 @@ int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
@@ -1810,6 +1974,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+/* Some GGTT VM helpers */
+#define obj_to_ggtt(obj) \
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool i915_is_ggtt(struct i915_address_space *vm)
+{
+ struct i915_address_space *ggtt =
+ &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+ return vm == ggtt;
+}
+
+static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
+ map_and_fenceable, nonblocking);
+}
+#undef obj_to_ggtt
+
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
@@ -1828,7 +2042,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
}
struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1862,7 +2076,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
@@ -1884,7 +2100,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
-inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -1897,23 +2113,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
- int handle);
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_gpu_error.c */
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+ const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+ size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+ struct drm_i915_error_state_buf *eb)
+{
+ kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev);
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
@@ -1993,7 +2222,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */
-#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
@@ -2002,7 +2230,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-#endif
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
@@ -2010,7 +2237,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2029,39 +2255,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
-#define __i915_read(x, y) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
+#define __i915_read(x) \
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
#undef __i915_read
-#define __i915_write(x, y) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
-
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
+#define __i915_write(x) \
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
#undef __i915_write
-#define I915_READ8(reg) i915_read8(dev_priv, (reg))
-#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
-#define I915_READ16(reg) i915_read16(dev_priv, (reg))
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
-#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-#define I915_READ(reg) i915_read32(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
-#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
-#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e2208cfe9..2d1cb10d846 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -37,11 +38,14 @@
#include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force);
+static __must_check int
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+ enum i915_cache_level level)
+{
+ return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return true;
+
+ return obj->pin_display;
+}
+
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
@@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
dev_priv->mm.object_memory += size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
dev_priv->mm.object_memory -= size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static int
@@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active;
+ return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
@@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
- pinned += obj->gtt_space->size;
+ pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.total;
+ args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file,
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- i915_gem_object_free(obj);
- return ret;
- }
-
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(&obj->base);
- trace_i915_gem_object_create(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
*handle_p = handle;
return 0;
@@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int i915_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* Creates a new mm object and returns a handle to it.
*/
@@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
- if (obj->cache_level == I915_CACHE_NONE)
- needs_clflush = 1;
- if (obj->gtt_space) {
+ needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
- if (!prefaulted) {
+ if (likely(!i915_prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true, true);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = obj->gtt_offset + args->offset;
+ offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
- if (obj->cache_level == I915_CACHE_NONE)
- needs_clflush_after = 1;
- if (obj->gtt_space) {
+ needs_clflush_after = cpu_write_needs_clflush(obj);
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
}
}
- /* Same trick applies for invalidate partially written cachelines before
- * writing. */
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
- && obj->cache_level == I915_CACHE_NONE)
- needs_clflush_before = 1;
+ /* Same trick applies to invalidate partially written cachelines read
+ * before writing. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ needs_clflush_before =
+ !cpu_cache_is_coherent(dev, obj->cache_level);
ret = i915_gem_object_get_pages(obj);
if (ret)
@@ -828,8 +835,8 @@ out:
*/
if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(dev);
+ if (i915_gem_clflush_object(obj, obj->pin_display))
+ i915_gem_chipset_flush(dev);
}
}
@@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ args->size);
+ if (ret)
+ return -EFAULT;
+ }
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->cache_level == I915_CACHE_NONE &&
- obj->tiling_mode == I915_TILING_NONE &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ if (obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
@@ -990,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool wait_forever = true;
int ret;
+ WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
+
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
@@ -1255,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
/* Pinned buffers may be scanout, so flush the cache */
- if (obj->pin_count)
- i915_gem_object_flush_cpu_write_domain(obj);
+ if (obj->pin_display)
+ i915_gem_object_flush_cpu_write_domain(obj, true);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1346,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1360,8 +1371,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
- page_offset;
+ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn >>= PAGE_SHIFT;
+ pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- if (obj->base.dev->dev_mapping)
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
-
+ drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
@@ -1485,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->base.map_list.map)
+ if (drm_vma_node_has_offset(&obj->base.vma_node))
return 0;
dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1524,6 @@ out:
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- if (!obj->base.map_list.map)
- return;
-
drm_gem_free_mmap_offset(&obj->base);
}
@@ -1557,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
goto out;
- *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
drm_gem_object_unreference(&obj->base);
@@ -1632,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
* hope for the best.
*/
WARN_ON(ret != -EIO);
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -1667,11 +1672,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(obj->gtt_space);
-
if (obj->pages_pin_count)
return -EBUSY;
+ BUG_ON(i915_gem_obj_bound_any(obj));
+
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
@@ -1704,12 +1709,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_unbind(obj) == 0 &&
- i915_gem_object_put_pages(obj) == 0) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+ global_list) {
+ struct i915_vma *vma, *v;
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (!i915_gem_object_put_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1892,8 +1903,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
obj->active = 1;
}
- /* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -1915,13 +1924,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2085,11 +2095,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
- if (!dev_priv->mm.suspended) {
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ if (!dev_priv->ums.mm_suspended) {
+ i915_queue_hangcheck(ring->dev);
+
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@@ -2119,10 +2127,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
+ if (acthd >= i915_gem_obj_offset(obj, vm) &&
+ acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
return false;
@@ -2145,6 +2154,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
return false;
}
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+ struct i915_address_space *vm;
+
+ vm = &dev_priv->gtt.base;
+
+ return vm;
+}
+
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
@@ -2152,9 +2172,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
-
if (request->batch_obj) {
- if (i915_head_inside_object(acthd, request->batch_obj)) {
+ if (i915_head_inside_object(acthd, request->batch_obj,
+ request_to_vm(request))) {
*inside = true;
return true;
}
@@ -2174,17 +2194,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
+ unsigned long offset = 0;
/* Innocent until proven guilty */
guilty = false;
- if (ring->hangcheck.action != wait &&
+ if (request->batch_obj)
+ offset = i915_gem_obj_offset(request->batch_obj,
+ request_to_vm(request));
+
+ if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
- request->batch_obj ?
- request->batch_obj->gtt_offset : 0,
+ offset,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2275,23 +2299,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Move everything out of the GPU domains to ensure we do any
- * necessary invalidation upon reuse.
- */
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- {
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- }
-
i915_gem_restore_fences(dev);
}
@@ -2400,7 +2413,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle &= list_empty(&ring->request_list);
}
- if (!dev_priv->mm.suspended && !idle)
+ if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
@@ -2586,18 +2599,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_vma_unbind(struct i915_vma *vma)
{
+ struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->gtt_space == NULL)
+ if (list_empty(&vma->vma_link))
return 0;
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
if (obj->pin_count)
return -EBUSY;
@@ -2618,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- trace_i915_gem_object_unbind(obj);
+ trace_i915_vma_unbind(vma);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@@ -2629,18 +2642,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
- list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
- obj->map_and_fenceable = true;
+ if (i915_is_ggtt(vma->vm))
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- obj->gtt_offset = 0;
+ drm_mm_remove_node(&vma->node);
+
+destroy:
+ i915_gem_vma_destroy(vma);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ * NB: Until we have real VMAs there will only ever be one */
+ WARN_ON(!list_empty(&obj->vma_list));
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
+/**
+ * Unbinds an object from the global GTT aperture.
+ */
+int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
+
+ if (!i915_gem_obj_ggtt_bound(obj))
+ return 0;
+
+ if (obj->pin_count)
+ return -EBUSY;
+
+ BUG_ON(obj->pages == NULL);
+
+ return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
+}
+
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2691,12 +2732,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(fence_reg);
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2761,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2739,7 +2780,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2805,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
- WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +3038,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
if (HAS_LLC(dev))
return true;
- if (gtt_space == NULL)
+ if (!drm_mm_node_allocated(gtt_space))
return true;
if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3071,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
@@ -3042,8 +3083,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
@@ -3058,18 +3099,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking)
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- bool mappable, fenceable;
- size_t gtt_max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+ size_t gtt_max =
+ map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+ struct i915_vma *vma;
int ret;
fence_size = i915_gem_get_gtt_size(dev,
@@ -3110,77 +3151,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL) {
- i915_gem_object_unpin_pages(obj);
- return -ENOMEM;
+ BUG_ON(!i915_is_ggtt(vm));
+
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
}
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_is_singular(&obj->vma_list));
+
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
- obj->cache_level, 0, gtt_max);
+ obj->cache_level, 0, gtt_max,
+ DRM_MM_SEARCH_DEFAULT);
if (ret) {
- ret = i915_gem_evict_something(dev, size, alignment,
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
if (ret == 0)
goto search_free;
- i915_gem_object_unpin_pages(obj);
- kfree(node);
- return ret;
+ goto err_free_vma;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return -EINVAL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+ obj->cache_level))) {
+ ret = -EINVAL;
+ goto err_remove_node;
}
ret = i915_gem_gtt_prepare_object(obj);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return ret;
- }
+ if (ret)
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&vma->mm_list, &vm->inactive_list);
- obj->gtt_space = node;
- obj->gtt_offset = node->start;
+ if (i915_is_ggtt(vm)) {
+ bool mappable, fenceable;
- fenceable =
- node->size == fence_size &&
- (node->start & (fence_alignment - 1)) == 0;
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
- mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+ }
- obj->map_and_fenceable = mappable && fenceable;
+ WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
- trace_i915_gem_object_bind(obj, map_and_fenceable);
+ trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ i915_gem_vma_destroy(vma);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
}
-void
-i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+bool
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (obj->pages == NULL)
- return;
+ return false;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
- return;
+ return false;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3190,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (obj->cache_level != I915_CACHE_NONE)
- return;
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return false;
trace_i915_gem_object_clflush(obj);
-
drm_clflush_sg(obj->pages);
+
+ return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3227,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force)
{
uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(obj->base.dev);
+ if (i915_gem_clflush_object(obj, force))
+ i915_gem_chipset_flush(obj->base.dev);
+
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3258,7 +3314,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3268,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_cpu_write_domain(obj);
+ i915_gem_object_flush_cpu_write_domain(obj, false);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3296,8 +3352,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ if (i915_gem_object_is_inactive(obj)) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (vma)
+ list_move_tail(&vma->mm_list,
+ &dev_priv->gtt.base.inactive_list);
+
+ }
return 0;
}
@@ -3307,6 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma;
int ret;
if (obj->cache_level == cache_level)
@@ -3317,13 +3380,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ break;
+ }
}
- if (obj->gtt_space) {
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3345,11 +3412,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
-
- obj->gtt_space->color = cache_level;
}
- if (cache_level == I915_CACHE_NONE) {
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ vma->node.color = cache_level;
+ obj->cache_level = cache_level;
+
+ if (cpu_write_needs_clflush(obj)) {
u32 old_read_domains, old_write_domain;
/* If we're coming from LLC cached, then we haven't
@@ -3359,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Just set it to the CPU cache for now.
*/
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
- WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -3372,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
- obj->cache_level = cache_level;
i915_gem_verify_gtt(dev);
return 0;
}
@@ -3394,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- args->caching = obj->cache_level != I915_CACHE_NONE;
+ switch (obj->cache_level) {
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ args->caching = I915_CACHING_CACHED;
+ break;
+
+ case I915_CACHE_WT:
+ args->caching = I915_CACHING_DISPLAY;
+ break;
+
+ default:
+ args->caching = I915_CACHING_NONE;
+ break;
+ }
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3417,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
+ case I915_CACHING_DISPLAY:
+ level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
default:
return -EINVAL;
}
@@ -3439,6 +3522,22 @@ unlock:
return ret;
}
+static bool is_pin_display(struct drm_i915_gem_object *obj)
+{
+ /* There are 3 sources that pin objects:
+ * 1. The display engine (scanouts, sprites, cursors);
+ * 2. Reservations for execbuffer;
+ * 3. The user.
+ *
+ * We can ignore reservations as we hold the struct_mutex and
+ * are only called outside of the reservation path. The user
+ * can only increment pin_count once, and so if after
+ * subtracting the potential reference by the user, any pin_count
+ * remains, it must be due to another use by the display engine.
+ */
+ return obj->pin_count - !!obj->user_pin_count;
+}
+
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3458,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
}
+ /* Mark the pin_display early so that we account for the
+ * display coherency whilst setting up the cache domains.
+ */
+ obj->pin_display = true;
+
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
@@ -3467,19 +3571,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_set_cache_level(obj,
+ HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
- return ret;
+ goto err_unpin_display;
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
if (ret)
- return ret;
+ goto err_unpin_display;
- i915_gem_object_flush_cpu_write_domain(obj);
+ i915_gem_object_flush_cpu_write_domain(obj, true);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3495,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
old_write_domain);
return 0;
+
+err_unpin_display:
+ obj->pin_display = is_pin_display(obj);
+ return ret;
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin(obj);
+ obj->pin_display = is_pin_display(obj);
}
int
@@ -3540,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, false);
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
@@ -3618,37 +3734,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
+ struct i915_vma *vma;
int ret;
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (obj->gtt_space != NULL) {
- if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+
+ if (vma) {
+ if ((alignment &&
+ vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- obj->gtt_offset, alignment,
+ i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
}
- if (obj->gtt_space == NULL) {
+ if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable,
- nonblocking);
+ ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
@@ -3669,7 +3792,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(obj->gtt_space == NULL);
+ BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3707,7 +3830,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
- ret = i915_gem_object_pin(obj, args->alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3715,11 +3838,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++;
obj->pin_filp = file;
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj->gtt_offset;
+ args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3858,10 +3977,11 @@ unlock:
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->obj_exec_link);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
@@ -3926,6 +4046,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
+ trace_i915_gem_object_create(obj);
+
return obj;
}
@@ -3941,6 +4063,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@@ -3948,15 +4071,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
- bool was_interruptible;
+ /* NB: 0 or 1 elements */
+ WARN_ON(!list_empty(&obj->vma_list) &&
+ !list_is_singular(&obj->vma_list));
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ if (WARN_ON(ret == -ERESTARTSYS)) {
+ bool was_interruptible;
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_vma_unbind(vma));
- dev_priv->mm.interruptible = was_interruptible;
+ dev_priv->mm.interruptible = was_interruptible;
+ }
}
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3982,15 +4111,42 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->exec_list);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ list_del(&vma->vma_link);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev->struct_mutex);
-
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4006,18 +4162,11 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound mm.suspended!
- */
- dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
-
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
@@ -4150,8 +4299,8 @@ i915_gem_init_hw(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+ if (dev_priv->ellc_size)
+ I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4376,7 @@ int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
if (ret != 0) {
@@ -4247,7 +4396,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4259,7 +4408,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
- dev_priv->mm.suspended = 1;
+ dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4269,11 +4418,26 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
drm_irq_uninstall(dev);
- return i915_gem_idle(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
+ */
+ if (ret != 0)
+ dev_priv->ums.mm_suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
void
@@ -4284,9 +4448,11 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
}
static void
@@ -4296,6 +4462,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
+{
+ vm->dev = dev_priv->dev;
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->global_link);
+ list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
@@ -4308,8 +4484,9 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4608,11 +4785,101 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->active)
+ continue;
+
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
+ }
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.start;
+
+ }
+ return -1;
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
+
+ return false;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_address_space *vm;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ if (i915_gem_obj_bound(o, vm))
+ return true;
+
+ return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+
+ BUG_ON(list_empty(&o->vma_list));
+
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma->node.size;
+
+ return 0;
+}
+
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171ca..403309c2a7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_LLC_MLC);
+ I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
}
struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *to;
-
- if (dev_priv->hw_contexts_disabled)
- return ERR_PTR(-ENOENT);
-
- if (ring->id != RCS)
- return ERR_PTR(-EINVAL);
-
- if (file == NULL)
- return ERR_PTR(-EINVAL);
+ struct i915_hw_context *ctx;
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
- to = i915_gem_context_get(file->driver_priv, id);
- if (to == NULL)
+ ctx = NULL;
+ if (!dev_priv->hw_contexts_disabled)
+ ctx = i915_gem_context_get(file->driver_priv, id);
+ if (ctx == NULL)
return ERR_PTR(-ENOENT);
- return &to->hang_stats;
+ return &ctx->hang_stats;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, new_context->obj->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
+ struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3da..775d506b320 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
return warned = err;
}
-#endif /* WATCH_INACTIVE */
-
-#if WATCH_COHERENCY
-void
-i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
-{
- struct drm_device *dev = obj->base.dev;
- int page;
- uint32_t *gtt_mapping;
- uint32_t *backing_map = NULL;
- int bad_count = 0;
-
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj->gtt_offset, handle,
- obj->size / 1024);
-
- gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
- obj->base.size);
- if (gtt_mapping == NULL) {
- DRM_ERROR("failed to map GTT space\n");
- return;
- }
-
- for (page = 0; page < obj->size / PAGE_SIZE; page++) {
- int i;
-
- backing_map = kmap_atomic(obj->pages[page]);
-
- if (backing_map == NULL) {
- DRM_ERROR("failed to map backing page\n");
- goto out;
- }
-
- for (i = 0; i < PAGE_SIZE / 4; i++) {
- uint32_t cpuval = backing_map[i];
- uint32_t gttval = readl(gtt_mapping +
- page * 1024 + i);
-
- if (cpuval != gttval) {
- DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
- "0x%08x vs 0x%08x\n",
- (int)(obj->gtt_offset +
- page * PAGE_SIZE + i * 4),
- cpuval, gttval);
- if (bad_count++ >= 8) {
- DRM_INFO("...\n");
- goto out;
- }
- }
- }
- kunmap_atomic(backing_map);
- backing_map = NULL;
- }
-
- out:
- if (backing_map != NULL)
- kunmap_atomic(backing_map);
- iounmap(gtt_mapping);
-
- /* give syslog time to catch up */
- msleep(1);
-
- /* Directly flush the object, since we just loaded values with the CPU
- * from the backing pages and we don't want to disturb the cache
- * management that we're trying to observe.
- */
-
- i915_gem_clflush_object(obj);
-}
-#endif
+#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9e657833080..e918b05fcbd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
#include "i915_drv.h"
#include <linux/dma-buf.h>
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_intel_bo(buf->priv);
+}
+
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
@@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
mutex_lock(&obj->base.dev->struct_mutex);
@@ -98,20 +103,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
mutex_unlock(&obj->base.dev->struct_mutex);
}
-static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct drm_i915_gem_object *obj = dma_buf->priv;
-
- if (obj->base.export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&obj->base);
- }
-}
-
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter;
struct page **pages;
@@ -159,7 +153,7 @@ error:
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
@@ -202,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -219,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
- .release = i915_gem_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
.kmap = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
@@ -233,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -272,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
/* is this one of own objects? */
if (dma_buf->ops == &i915_dmabuf_ops) {
- obj = dma_buf->priv;
+ obj = dma_buf_to_obj(dma_buf);
/* is it from our device? */
if (obj->base.dev == dev) {
/*
@@ -297,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- if (ret) {
- i915_gem_object_free(obj);
- goto fail_detach;
- }
-
+ drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356f..91b70015585 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,23 +32,23 @@
#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- if (obj->pin_count)
+ if (vma->obj->pin_count)
return false;
- list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(obj->gtt_space);
+ list_add(&vma->exec_list, unwind);
+ return drm_mm_scan_add_block(&vma->node);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
int ret = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
*/
INIT_LIST_HEAD(&unwind_list);
- if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level,
- 0, dev_priv->gtt.mappable_end);
- else
- drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level);
+ if (mappable) {
+ BUG_ON(!i915_is_ggtt(vm));
+ drm_mm_init_scan_with_range(&vm->mm, min_size,
+ alignment, cache_level, 0,
+ dev_priv->gtt.mappable_end);
+ } else
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
goto found;
}
none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
- obj = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
exec_list);
-
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
- list_del_init(&obj->exec_list);
+ list_del_init(&vma->exec_list);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -124,27 +123,30 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
exec_list);
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
- list_move(&obj->exec_list, &eviction_list);
- drm_gem_object_reference(&obj->base);
+ if (drm_mm_scan_remove_block(&vma->node)) {
+ list_move(&vma->exec_list, &eviction_list);
+ drm_gem_object_reference(&vma->obj->base);
continue;
}
- list_del_init(&obj->exec_list);
+ list_del_init(&vma->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
+ struct drm_gem_object *obj;
+ vma = list_first_entry(&eviction_list,
+ struct i915_vma,
exec_list);
+
+ obj = &vma->obj->base;
+ list_del_init(&vma->exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(obj);
}
return ret;
@@ -154,12 +156,18 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
- bool lists_empty;
+ struct i915_address_space *vm;
+ struct i915_vma *vma, *next;
+ bool lists_empty = true;
int ret;
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
if (lists_empty)
return -ENOSPC;
@@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list)
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ if (vma->obj->pin_count == 0)
+ WARN_ON(i915_vma_unbind(vma));
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e517..792c52a235e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -172,9 +172,60 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
}
static int
+relocate_entry_cpu(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ char *vaddr;
+ int ret = -EINVAL;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
+static int
+relocate_entry_gtt(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+ int ret = -EINVAL;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += i915_gem_obj_ggtt_offset(obj);
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + offset_in_page(reloc->offset));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+
+ return 0;
+}
+
+static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -188,7 +239,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = target_i915_obj->gtt_offset;
+ target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -254,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EFAULT;
reloc->delta += target_offset;
- if (use_cpu_reloc(obj)) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
- char *vaddr;
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- return ret;
-
- vaddr = kmap_atomic(i915_gem_object_get_page(obj,
- reloc->offset >> PAGE_SHIFT));
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
- kunmap_atomic(vaddr);
- } else {
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
- iowrite32(reloc->delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
+ if (use_cpu_reloc(obj))
+ ret = relocate_entry_cpu(obj, reloc);
+ else
+ ret = relocate_entry_gtt(obj, reloc);
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -297,7 +318,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb)
+ struct eb_objects *eb,
+ struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +343,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
+ vm);
if (ret)
return ret;
@@ -344,13 +367,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs)
+ struct drm_i915_gem_relocation_entry *relocs,
+ struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
+ vm);
if (ret)
return ret;
}
@@ -359,7 +384,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -373,7 +399,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@@ -395,6 +421,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
+ struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+ ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ false);
if (ret)
return ret;
@@ -436,8 +464,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != obj->gtt_offset) {
- entry->offset = obj->gtt_offset;
+ if (entry->offset != i915_gem_obj_offset(obj, vm)) {
+ entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@@ -458,7 +486,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@@ -475,6 +503,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
+ struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -529,31 +558,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
+ u32 obj_offset;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_bound(obj, vm))
continue;
+ obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ WARN_ON((need_mappable || need_fence) &&
+ !i915_is_ggtt(vm));
+
+ if ((entry->alignment &&
+ obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
+ if (i915_gem_obj_bound(obj, vm))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@@ -577,7 +612,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@@ -661,14 +697,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset]);
+ reloc + reloc_offset[offset],
+ vm);
if (ret)
goto err;
}
@@ -691,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
+ bool flush_chipset = false;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@@ -699,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
+ flush_chipset |= i915_gem_clflush_object(obj, false);
flush_domains |= obj->base.write_domain;
}
- if (flush_domains & I915_GEM_DOMAIN_CPU)
+ if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
@@ -758,8 +796,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_multipages_readable(ptr, length))
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ if (fault_in_multipages_readable(ptr, length))
+ return -EFAULT;
+ }
}
return 0;
@@ -767,6 +807,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -781,6 +822,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+ /* FIXME: This lookup gets fixed later <-- danvet */
+ list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
@@ -835,7 +878,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@@ -872,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -880,7 +924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -888,7 +932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -972,7 +1016,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
@@ -997,17 +1041,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb);
+ ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1058,7 +1102,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_start = i915_gem_obj_offset(batch_obj, vm) +
+ args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1083,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
@@ -1104,6 +1149,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1159,7 +1205,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1185,6 +1232,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1215,7 +1263,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b..212f6d8c35e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,12 @@
#include "i915_trace.h"
#include "intel_drv.h"
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@@ -39,19 +43,50 @@
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+/* Cacheability Control is a 4-bit value. The low three bits are stored in *
+ * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+
+static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
- case I915_CACHE_LLC_MLC:
- pte |= GEN6_PTE_CACHE_LLC_MLC;
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return pte;
+}
+
+static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
@@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- BUG();
+ WARN_ON(1);
}
return pte;
@@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
- pte |= GEN6_PTE_CACHE_LLC;
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE0;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE0;
+ break;
+ }
return pte;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = ppgtt->pte_encode(ppgtt->dev,
- ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
- cache_level);
+ pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
int i;
+ drm_mm_takedown(&ppgtt->base.mm);
+
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->dev->pdev,
+ pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
@@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->dev;
+ struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
@@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
- if (IS_HASWELL(dev)) {
- ppgtt->pte_encode = hsw_pte_encode;
- } else if (IS_VALLEYVIEW(dev)) {
- ppgtt->pte_encode = byt_pte_encode;
- } else {
- ppgtt->pte_encode = gen6_pte_encode;
- }
- ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
- ppgtt->clear_range = gen6_ppgtt_clear_range;
- ppgtt->insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
@@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return -ENOMEM;
- ppgtt->dev = dev;
- ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+ ppgtt->base.dev = dev;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (ret)
kfree(ppgtt);
- else
+ else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
+ drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+ ppgtt->base.total);
+ }
return ret;
}
@@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return;
- ppgtt->cleanup(ppgtt);
+ ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
}
@@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- ppgtt->insert_entries(ppgtt, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- ppgtt->clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->base.clear_range(&ppgtt->base,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
extern int intel_iommu_gfx_mapped;
@@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
- dev_priv->gtt.total / PAGE_SIZE);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, obj->pin_display);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_insert_entries(struct drm_device *dev,
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
- &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
i++;
}
@@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1])
- != dev_priv->gtt.pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1]) !=
+ vm->pte_encode(addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
-static void gen6_ggtt_clear_range(struct drm_device *dev,
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = dev_priv->gtt.pte_encode(dev,
- dev_priv->gtt.scratch_page_dma,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
-static void i915_ggtt_insert_entries(struct drm_device *dev,
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
@@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
}
-static void i915_ggtt_clear_range(struct drm_device *dev,
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
@@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
+ entry,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ entry,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
- dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+ dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
- obj->gtt_offset, obj->base.size);
-
- BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_offset,
- obj->base.size,
- false);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ int ret;
+ DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+ i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+ WARN_ON(i915_gem_obj_ggtt_bound(obj));
+ ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+ if (ret)
+ DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
+ list_add(&vma->vma_link, &obj->vma_list);
}
- dev_priv->gtt.start = start;
- dev_priv->gtt.total = end - start;
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
- hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+ const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
- (hole_end-hole_start) / PAGE_SIZE);
+ ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
/* And finally clear the reserved guard page */
- dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+ ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
static bool
@@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
- gtt_size = dev_priv->gtt.total;
+ gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
return;
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
- gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
+ gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->gtt.scratch_page = page;
- dev_priv->gtt.scratch_page_dma = dma_addr;
+ dev_priv->gtt.base.scratch.page = page;
+ dev_priv->gtt.base.scratch.addr = dma_addr;
return 0;
}
@@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->gtt.scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+ struct page *page = dev_priv->gtt.base.scratch.page;
+
+ set_pages_wb(page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->gtt.scratch_page);
- __free_page(dev_priv->gtt.scratch_page);
+ put_page(page);
+ __free_page(page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
- dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
return ret;
}
-static void gen6_gmch_remove(struct drm_device *dev)
+static void gen6_gmch_remove(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->gtt.gsm);
- teardown_scratch_page(dev_priv->dev);
+
+ struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+ iounmap(gtt->gsm);
+ teardown_scratch_page(vm->dev);
}
static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
return 0;
}
-static void i915_gmch_remove(struct drm_device *dev)
+static void i915_gmch_remove(struct i915_address_space *vm)
{
intel_gmch_remove();
}
@@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- dev_priv->gtt.gtt_probe = i915_gmch_probe;
- dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ gtt->gtt_probe = i915_gmch_probe;
+ gtt->base.cleanup = i915_gmch_remove;
} else {
- dev_priv->gtt.gtt_probe = gen6_gmch_probe;
- dev_priv->gtt.gtt_remove = gen6_gmch_remove;
- if (IS_HASWELL(dev)) {
- dev_priv->gtt.pte_encode = hsw_pte_encode;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->gtt.pte_encode = byt_pte_encode;
- } else {
- dev_priv->gtt.pte_encode = gen6_pte_encode;
- }
+ gtt->gtt_probe = gen6_gmch_probe;
+ gtt->base.cleanup = gen6_gmch_remove;
+ if (IS_HASWELL(dev) && dev_priv->ellc_size)
+ gtt->base.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(dev))
+ gtt->base.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(dev))
+ gtt->base.pte_encode = byt_pte_encode;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ gtt->base.pte_encode = ivb_pte_encode;
+ else
+ gtt->base.pte_encode = snb_pte_encode;
}
- ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
- &dev_priv->gtt.stolen_size,
- &gtt->mappable_base,
- &gtt->mappable_end);
+ ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+ &gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
+ gtt->base.dev = dev;
+
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
- dev_priv->gtt.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
- dev_priv->gtt.mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
- dev_priv->gtt.stolen_size >> 20);
+ gtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cec..9969d10b80f 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -45,49 +45,48 @@
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
+ struct resource *r;
u32 base;
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so on those compute the base by subtracting the
- * stolen memory from the Top of Low Usable DRAM which is where the
- * BIOS places the graphics stolen memory.
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at offset 0x5c in the igfx configuration space. On a few (desktop)
+ * machines this is also mirrored in the bridge device at different
+ * locations, or in the MCHBAR. On gen2, the layout is again slightly
+ * different with the Graphics Segment immediately following Top of
+ * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+ * reported by 865g, so we just use the top of memory as determined
+ * by the e820 probe.
*
- * On gen2, the layout is slightly different with the Graphics Segment
- * immediately following Top of Memory (or Top of Usable DRAM). Note
- * it appears that TOUD is only reported by 865g, so we just use the
- * top of memory as determined by the e820 probe.
- *
- * XXX gen2 requires an unavailable symbol and 945gm fails with
- * its value of TOLUD.
+ * XXX However gen2 requires an unavailable symbol.
*/
base = 0;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 3) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- /* Read Base Data of Stolen Memory Register (BDSM) directly.
- * Note that there is also a MCHBAR miror at 0x1080c0 or
- * we could use device 2:0x5c instead.
- */
- pci_read_config_dword(pdev, 0xB0, &base);
- base &= ~4095; /* lower bits used for locking register */
- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* Read Graphics Base of Stolen Memory directly */
- pci_read_config_dword(pdev, 0xA4, &base);
+ } else { /* GEN2 */
#if 0
- } else if (IS_GEN3(dev)) {
- u8 val;
- /* Stolen is immediately below Top of Low Usable DRAM */
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- base -= dev_priv->mm.gtt->stolen_size;
- } else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
+ if (base == 0)
+ return 0;
+
+ /* Verify that nothing else uses this physical address. Stolen
+ * memory should be reserved by the BIOS and hidden from the
+ * kernel. So if the region is already marked as busy, something
+ * is seriously wrong.
+ */
+ r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
+
return base;
}
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int ret;
- /* Try to over-allocate to reduce reallocations and fragmentation */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size <<= 1, 4096, 0);
+ compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size >>= 1, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
+ goto err_llb;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_llb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
- dev_priv->compressed_llb = compressed_llb;
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+ 4096, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->compressed_fb = compressed_fb;
- dev_priv->cfb_size = size;
+ dev_priv->fbc.compressed_fb = compressed_fb;
+ dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
return 0;
err_fb:
- drm_mm_put_block(compressed_fb);
-err:
+ kfree(compressed_llb);
+ drm_mm_remove_node(compressed_fb);
+err_llb:
+ kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->cfb_size)
+ if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->cfb_size == 0)
+ if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->compressed_fb)
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->fbc.compressed_fb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+ kfree(dev_priv->fbc.compressed_fb);
+ }
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
+ if (dev_priv->fbc.compressed_llb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
- dev_priv->cfb_size = 0;
+ dev_priv->fbc.size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+ if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ return 0;
+
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj == NULL)
return NULL;
- if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
- goto cleanup;
-
+ drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
@@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->cache_level = I915_CACHE_NONE;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
@@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (size == 0)
return NULL;
- stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (stolen)
- stolen = drm_mm_get_block(stolen, size, 4096);
- if (stolen == NULL)
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+ 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ kfree(stolen);
return NULL;
+ }
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
@@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0))
return NULL;
- stolen = drm_mm_create_block(&dev_priv->mm.stolen,
- stolen_offset, size,
- false);
- if (stolen == NULL) {
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ stolen->start = stolen_offset;
+ stolen->size = size;
+ ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
/* Some objects just need physical mem from stolen space */
- if (gtt_offset == -1)
+ if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ vma = i915_gem_vma_create(obj, ggtt);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_out;
+ }
+
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- gtt_offset, size,
- false);
- if (obj->gtt_space == NULL) {
+ vma->node.start = gtt_offset;
+ vma->node.size = size;
+ if (drm_mm_initialized(&ggtt->mm)) {
+ ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- drm_gem_object_unreference(&obj->base);
- return NULL;
+ goto err_vma;
}
- } else
- obj->gtt_space = I915_GTT_RESERVED;
+ }
- obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&vma->mm_list, &ggtt->inactive_list);
return obj;
+
+err_vma:
+ i915_gem_vma_destroy(vma);
+err_out:
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
- drm_mm_put_block(obj->stolen);
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
obj->stolen = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69d..032e9ef9c89 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (obj->gtt_space->size != size)
+ if (i915_gem_obj_ggtt_size(obj) != size)
return false;
- if (obj->gtt_offset & (size - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
obj->map_and_fenceable =
- obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) +
+ obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
- u32 unfenced_alignment =
+ u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (obj->gtt_offset & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 00000000000..558e568d5b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
+ case VECS: return "vebox";
+ default: return "";
+ }
+}
+
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+ if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+ e->err = -ENOSPC;
+ return false;
+ }
+
+ if (e->bytes == e->size - 1 || e->err)
+ return false;
+
+ return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ if (e->pos + len <= e->start) {
+ e->pos += len;
+ return false;
+ }
+
+ /* First vsnprintf needs to fit in its entirety for memmove */
+ if (len >= e->size) {
+ e->err = -EIO;
+ return false;
+ }
+
+ return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ /* If this is first printf in this window, adjust it so that
+ * start position matches start of the buffer
+ */
+
+ if (e->pos < e->start) {
+ const size_t off = e->start - e->pos;
+
+ /* Should not happen but be paranoid */
+ if (off > len || e->bytes) {
+ e->err = -EIO;
+ return;
+ }
+
+ memmove(e->buf, e->buf + off, len - off);
+ e->bytes = len - off;
+ e->pos = e->start;
+ return;
+ }
+
+ e->bytes += len;
+ e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+ const char *f, va_list args)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ len = vsnprintf(NULL, 0, f, args);
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+
+ __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ len = strlen(str);
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+ memcpy(e->buf + e->bytes, str, len);
+
+ __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ err_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ err_printf(m, " %08x %8u %02x %02x %x %x",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->rseqno, err->wseqno);
+ err_puts(m, pin_flag(err->pinned));
+ err_puts(m, tiling_flag(err->tiling));
+ err_puts(m, dirty_flag(err->dirty));
+ err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->ring != -1 ? " " : "");
+ err_puts(m, ring_str(err->ring));
+ err_puts(m, i915_cache_level_str(err->cache_level));
+
+ if (err->name)
+ err_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ err_printf(m, " (fence: %d)", err->fence_reg);
+
+ err_puts(m, "\n");
+ err++;
+ }
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+ err_printf(m, "%s command stream:\n", ring_str(ring));
+ err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
+ err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+ err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
+ if (HAS_VEBOX(dev)) {
+ err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][2],
+ error->semaphore_seqno[ring][2]);
+ }
+ }
+ err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
+ err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+ va_list args;
+
+ va_start(args, f);
+ i915_error_vprintf(e, f, args);
+ va_end(args);
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ const struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_device *dev = error_priv->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
+ int i, j, page, offset, elt;
+
+ if (!error) {
+ err_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+ err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ err_printf(m, "EIR: 0x%08x\n", error->eir);
+ err_printf(m, "IER: 0x%08x\n", error->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+ err_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
+ error->extra_instdone[i]);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, "ERROR: 0x%08x\n", error->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo[0],
+ error->active_bo_count[0]);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo[0],
+ error->pinned_bo_count[0]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+
+ if ((obj = error->ring[i].batchbuffer)) {
+ err_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ if (error->ring[i].num_requests) {
+ err_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
+ err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ obj = error->ring[i].ctx;
+ if (obj) {
+ err_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
+ }
+
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
+out:
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+ size_t count, loff_t pos)
+{
+ memset(ebuf, 0, sizeof(*ebuf));
+
+ /* We need to have enough room to store any i915_error_state printf
+ * so that we can move it to start position.
+ */
+ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size,
+ GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = 128;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL)
+ return -ENOMEM;
+
+ ebuf->start = pos;
+
+ return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].ctx);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error->display);
+ kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ const int num_pages)
+{
+ struct drm_i915_error_object *dst;
+ int i;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+ for (i = 0; i < num_pages; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = num_pages;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), \
+ (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct i915_vma *vma;
+ int i = 0;
+
+ list_for_each_entry(vma, head, mm_list) {
+ capture_bo(err++, vma->obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, global_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ obj = vma->obj;
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
+ }
+
+ return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ }
+
+ if (HAS_VEBOX(dev)) {
+ error->semaphore_mboxes[ring->id][2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS)
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+ ering->ctx = i915_error_object_create_sized(dev_priv,
+ obj, 1);
+ break;
+ }
+ }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct i915_address_space *vm,
+ const int ndx)
+{
+ struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i;
+
+ i = 0;
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ i++;
+ error->active_bo_count[ndx] = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (obj->pin_count)
+ i++;
+ error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+ if (i) {
+ active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+ if (active_bo)
+ pinned_bo = active_bo + error->active_bo_count[ndx];
+ }
+
+ if (active_bo)
+ error->active_bo_count[ndx] =
+ capture_active_bo(active_bo,
+ error->active_bo_count[ndx],
+ &vm->active_list);
+
+ if (pinned_bo)
+ error->pinned_bo_count[ndx] =
+ capture_pinned_bo(pinned_bo,
+ error->pinned_bo_count[ndx],
+ &dev_priv->mm.bound_list);
+ error->active_bo[ndx] = active_bo;
+ error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm;
+ int cnt = 0, i = 0;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ cnt++;
+
+ if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
+ cnt = 1;
+
+ vm = &dev_priv->gtt.base;
+
+ error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+ error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+ error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+ GFP_ATOMIC);
+ error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+ GFP_ATOMIC);
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_capture_vm(dev_priv, error, vm, i++);
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+ int pipe;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ if (error)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in "
+ "/sys/class/drm/card%d/error\n", dev->primary->index);
+
+ kref_init(&error->ref);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
+ if (!HAS_PCH_SPLIT(dev))
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
+ i915_gem_capture_buffers(dev_priv, error);
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ i915_error_state_free(&error->ref);
+}
+
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped or LLC";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ default: return "";
+ }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef15..a03b445ceb5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.deimr &= ~mask;
+ return;
+ }
+
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.deimr |= mask;
+ return;
+ }
+
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
+/**
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ dev_priv->gt_irq_mask &= ~interrupt_mask;
+ dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
+
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, mask);
+}
+
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, 0);
+}
+
+/**
+ * snb_update_pm_irq - update GEN6_PMIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+ POSTING_READ(GEN6_PMIMR);
+ }
+}
+
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ snb_update_pm_irq(dev_priv, mask, 0);
+}
+
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -128,6 +219,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ assert_spin_locked(&dev_priv->irq_lock);
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -152,38 +245,75 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
}
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- bool enable)
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
if (!ivb_can_enable_err_int(dev))
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
- ERR_INT_FIFO_UNDERRUN_B |
- ERR_INT_FIFO_UNDERRUN_C);
-
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
+ bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
+
+ /* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (!was_enabled &&
+ (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
+ DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
}
}
-static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t sdeimr = I915_READ(SDEIMR);
+ sdeimr &= ~interrupt_mask;
+ sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled &&
+ (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ I915_WRITE(SDEIMR, sdeimr);
+ POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
bool enable)
{
- struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
- SDE_TRANSB_FIFO_UNDER;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+ ibx_enable_display_interrupt(dev_priv, bit);
else
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
-
- POSTING_READ(SDEIMR);
+ ibx_disable_display_interrupt(dev_priv, bit);
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +323,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
if (!cpt_can_enable_serr_int(dev))
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
- SERR_INT_TRANS_B_FIFO_UNDERRUN |
- SERR_INT_TRANS_C_FIFO_UNDERRUN);
-
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
- }
+ uint32_t tmp = I915_READ(SERR_INT);
+ bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
- POSTING_READ(SDEIMR);
+ /* Change the state _after_ we've read out the current one. */
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (!was_enabled &&
+ (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
+ DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
}
/**
@@ -243,7 +380,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, enable);
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +406,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe p;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
- if (HAS_PCH_LPT(dev)) {
- crtc = NULL;
- for_each_pipe(p) {
- struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
- if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
- crtc = c;
- break;
- }
- }
- if (!crtc) {
- DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
- return false;
- }
- } else {
- crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- }
- intel_crtc = to_intel_crtc(crtc);
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -303,7 +430,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+ ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
@@ -319,6 +446,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == mask)
return;
@@ -334,6 +463,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == 0)
return;
@@ -625,14 +756,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
- unsigned long flags;
- spin_lock_irqsave(&mchdev_lock, flags);
+ spin_lock(&mchdev_lock);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -660,7 +790,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
- spin_unlock_irqrestore(&mchdev_lock, flags);
+ spin_unlock(&mchdev_lock);
return;
}
@@ -668,34 +798,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ring->obj == NULL)
return;
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ i915_queue_hangcheck(dev);
}
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
- u32 pm_iir, pm_imr;
+ u32 pm_iir;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- pm_imr = I915_READ(GEN6_PMIMR);
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
- I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
+ snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
return;
@@ -781,13 +908,12 @@ static void ivybridge_parity_work(struct work_struct *work)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
- parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +930,31 @@ static void ivybridge_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long flags;
if (!HAS_L3_GPU_CACHE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_lock(&dev_priv->irq_lock);
+ ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
@@ -841,32 +976,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
}
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- ivybridge_handle_parity_error(dev);
-}
-
-/* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
- u32 pm_iir)
-{
- unsigned long flags;
-
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->rps.pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by dev_priv->rps.work.
- */
-
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- dev_priv->rps.pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ ivybridge_parity_error_irq_handler(dev);
}
#define HPD_STORM_DETECT_PERIOD 1000
@@ -886,6 +996,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ WARN(((hpd[i] & hotplug_trigger) &&
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
+ "Received HPD interrupt although disabled\n");
+
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
@@ -896,6 +1010,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
dev_priv->hpd_stats[i].hpd_cnt = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +1018,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
storm_detected = true;
} else {
dev_priv->hpd_stats[i].hpd_cnt++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+ dev_priv->hpd_stats[i].hpd_cnt);
}
}
@@ -928,28 +1045,21 @@ static void dp_aux_irq_handler(struct drm_device *dev)
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
- * we must be able to deal with other PM interrupts. This is complicated because
- * of the way in which we use the masks to defer the RPS work (which for
- * posterity is necessary because of forcewake).
- */
-static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
- u32 pm_iir)
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- unsigned long flags;
+ if (pm_iir & GEN6_PM_RPS_EVENTS) {
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+ snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
+ spin_unlock(&dev_priv->irq_lock);
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
- if (dev_priv->rps.pm_iir) {
- I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
- /* never want to mask useful interrupts. (also posting read) */
- WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- /* TODO: if queue_work is slow, move it out of the spinlock */
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+ if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
@@ -1028,8 +1138,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
- if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
+ if (pm_iir)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,27 +1289,112 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
cpt_serr_int_handler(dev);
}
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE)
+ intel_opregion_asle_intr(dev);
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ if (de_iir & DE_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+ DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+ if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+ DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev);
+}
+
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (de_iir & DE_ERR_INT_IVB)
+ ivb_err_int_handler(dev);
+
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_asle_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ }
+
+ /* check event from PCH */
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
+ u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- int i;
+ bool err_int_reenable = false;
atomic_inc(&dev_priv->irq_received);
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
- if (IS_HASWELL(dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed register before interrupt\n");
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
+ intel_uncore_check_errors(dev);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -1217,62 +1412,42 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
* handler. */
if (IS_HASWELL(dev)) {
spin_lock(&dev_priv->irq_lock);
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
+ if (err_int_reenable)
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (INTEL_INFO(dev)->gen >= 6)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
- if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
-
- if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
-
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
- if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
- intel_prepare_page_flip(dev, i);
- intel_finish_page_flip_plane(dev, i);
- }
- }
-
- /* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
- /* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
+ if (INTEL_INFO(dev)->gen >= 7)
+ ivb_display_irq_handler(dev, de_iir);
+ else
+ ilk_display_irq_handler(dev, de_iir);
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
- pm_iir = I915_READ(GEN6_PMIIR);
- if (pm_iir) {
- if (IS_HASWELL(dev))
- hsw_pm_irq_handler(dev_priv, pm_iir);
- else if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- ret = IRQ_HANDLED;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
+ }
}
- if (IS_HASWELL(dev)) {
+ if (err_int_reenable) {
spin_lock(&dev_priv->irq_lock);
if (ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1289,119 +1464,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
return ret;
}
-static void ilk_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
- atomic_inc(&dev_priv->irq_received);
-
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
-
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (IS_GEN5(dev))
- ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- else
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
-
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
- if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
-
- if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
- if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
- if (de_iir & DE_PLANEA_FLIP_DONE) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (de_iir & DE_PLANEB_FLIP_DONE) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
-
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
- else
- ibx_irq_handler(dev, pch_iir);
-
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_handle_rps_change(dev);
-
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
- I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
-
- return ret;
-}
-
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -1417,9 +1479,9 @@ static void i915_error_work_func(struct work_struct *work)
gpu_error);
struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
- char *error_event[] = { "ERROR=1", NULL };
- char *reset_event[] = { "RESET=1", NULL };
- char *reset_done_event[] = { "ERROR=0", NULL };
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1470,535 +1532,6 @@ static void i915_error_work_func(struct work_struct *work)
}
}
-/* NB: please notice the memset */
-static void i915_get_extra_instdone(struct drm_device *dev,
- uint32_t *instdone)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- switch(INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
- instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
- instdone[0] = I915_READ(INSTDONE_I965);
- instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- instdone[0] = I915_READ(GEN7_INSTDONE_1);
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct drm_i915_error_object *
-i915_error_object_create_sized(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src,
- const int num_pages)
-{
- struct drm_i915_error_object *dst;
- int i;
- u32 reloc_offset;
-
- if (src == NULL || src->pages == NULL)
- return NULL;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
- if (dst == NULL)
- return NULL;
-
- reloc_offset = src->gtt_offset;
- for (i = 0; i < num_pages; i++) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (reloc_offset < dev_priv->gtt.mappable_end &&
- src->has_global_gtt_mapping) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else if (src->stolen) {
- unsigned long offset;
-
- offset = dev_priv->mm.stolen_base;
- offset += src->stolen->start;
- offset += i << PAGE_SHIFT;
-
- memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
-
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
-
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
-
- dst->pages[i] = d;
-
- reloc_offset += PAGE_SIZE;
- }
- dst->page_count = num_pages;
- dst->gtt_offset = src->gtt_offset;
-
- return dst;
-
-unwind:
- while (i--)
- kfree(dst->pages[i]);
- kfree(dst);
- return NULL;
-}
-#define i915_error_object_create(dev_priv, src) \
- i915_error_object_create_sized((dev_priv), (src), \
- (src)->base.size>>PAGE_SHIFT)
-
-static void
-i915_error_object_free(struct drm_i915_error_object *obj)
-{
- int page;
-
- if (obj == NULL)
- return;
-
- for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
-
- kfree(obj);
-}
-
-void
-i915_error_state_free(struct kref *error_ref)
-{
- struct drm_i915_error_state *error = container_of(error_ref,
- typeof(*error), ref);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- i915_error_object_free(error->ring[i].batchbuffer);
- i915_error_object_free(error->ring[i].ringbuffer);
- i915_error_object_free(error->ring[i].ctx);
- kfree(error->ring[i].requests);
- }
-
- kfree(error->active_bo);
- kfree(error->overlay);
- kfree(error->display);
- kfree(error);
-}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct drm_i915_gem_object *obj)
-{
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, mm_list) {
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, global_list) {
- if (obj->pin_count == 0)
- continue;
-
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static void i915_gem_record_fences(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_gem_object *obj;
- u32 seqno;
-
- if (!ring->get_seqno)
- return NULL;
-
- if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
- u32 acthd = I915_READ(ACTHD);
-
- if (WARN_ON(ring->id != RCS))
- return NULL;
-
- obj = ring->private;
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
- return i915_error_object_create(dev_priv, obj);
- }
-
- seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->ring != ring)
- continue;
-
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj);
- }
-
- return NULL;
-}
-
-static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
- error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS)
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
- }
-
- error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring, false);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
- error->ctl[ring->id] = I915_READ_CTL(ring);
-
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
-}
-
-
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
- struct drm_i915_error_state *error,
- struct drm_i915_error_ring *ering)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_object *obj;
-
- /* Currently render ring is the only HW context user */
- if (ring->id != RCS || !error->ccid)
- return;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
- ering->ctx = i915_error_object_create_sized(dev_priv,
- obj, 1);
- }
- }
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct drm_i915_gem_request *request;
- int i, count;
-
- for_each_ring(ring, dev_priv, i) {
- i915_record_ring_state(dev, error, ring);
-
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
-
- error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
-
-
- i915_gem_record_active_context(ring, error, &error->ring[i]);
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list)
- count++;
-
- error->ring[i].num_requests = count;
- error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
- GFP_ATOMIC);
- if (error->ring[i].requests == NULL) {
- error->ring[i].num_requests = 0;
- continue;
- }
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
- struct drm_i915_error_request *erq;
-
- erq = &error->ring[i].requests[count++];
- erq->seqno = request->seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
- }
- }
-}
-
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-static void i915_capture_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- struct drm_i915_error_state *error;
- unsigned long flags;
- int i, pipe;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
- return;
-
- /* Account for pipe specific data like PIPE*STAT */
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- return;
- }
-
- DRM_INFO("capturing error event; look for more information in "
- "/sys/kernel/debug/dri/%d/i915_error_state\n",
- dev->primary->index);
-
- kref_init(&error->ref);
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- if (HAS_HW_CONTEXTS(dev))
- error->ccid = I915_READ(CCID);
-
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else if (IS_VALLEYVIEW(dev))
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
- else if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
-
- if (INTEL_INFO(dev)->gen >= 6)
- error->derrmr = I915_READ(DERRMR);
-
- if (IS_VALLEYVIEW(dev))
- error->forcewake = I915_READ(FORCEWAKE_VLV);
- else if (INTEL_INFO(dev)->gen >= 7)
- error->forcewake = I915_READ(FORCEWAKE_MT);
- else if (INTEL_INFO(dev)->gen == 6)
- error->forcewake = I915_READ(FORCEWAKE);
-
- if (!HAS_PCH_SPLIT(dev))
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- error->err_int = I915_READ(GEN7_ERR_INT);
-
- i915_get_extra_instdone(dev, error->extra_instdone);
-
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
-
- /* Record buffers on the active and pinned lists. */
- error->active_bo = NULL;
- error->pinned_bo = NULL;
-
- i = 0;
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
- i++;
- error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
- i++;
- error->pinned_bo_count = i - error->active_bo_count;
-
- error->active_bo = NULL;
- error->pinned_bo = NULL;
- if (i) {
- error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
- GFP_ATOMIC);
- if (error->active_bo)
- error->pinned_bo =
- error->active_bo + error->active_bo_count;
- }
-
- if (error->active_bo)
- error->active_bo_count =
- capture_active_bo(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
-
- if (error->pinned_bo)
- error->pinned_bo_count =
- capture_pinned_bo(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.bound_list);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (dev_priv->gpu_error.first_error == NULL) {
- dev_priv->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- i915_error_state_free(&error->ref);
-}
-
-void i915_destroy_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- kref_put(&error->ref, i915_error_state_free);
-}
-#else
-#define i915_capture_error_state(x)
-#endif
-
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1688,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- obj->gtt_offset;
+ i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -2202,29 +1735,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2275,21 +1793,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (pipe * 5));
+ ironlake_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2392,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
u32 tmp;
if (ring->hangcheck.acthd != acthd)
- return active;
+ return HANGCHECK_ACTIVE;
if (IS_GEN2(dev))
- return hung;
+ return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
@@ -2407,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
- return kick;
+ return HANGCHECK_KICK;
}
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(ring)) {
default:
- return hung;
+ return HANGCHECK_HUNG;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
- return kick;
+ return HANGCHECK_KICK;
case 0:
- return wait;
+ return HANGCHECK_WAIT;
}
}
- return hung;
+ return HANGCHECK_HUNG;
}
/**
@@ -2435,7 +1943,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2471,8 +1979,6 @@ void i915_hangcheck_elapsed(unsigned long data)
} else
busy = false;
} else {
- int score;
-
/* We always increment the hangcheck score
* if the ring is busy and still processing
* the same request, so that no single request
@@ -2492,21 +1998,19 @@ void i915_hangcheck_elapsed(unsigned long data)
acthd);
switch (ring->hangcheck.action) {
- case wait:
- score = 0;
+ case HANGCHECK_WAIT:
break;
- case active:
- score = BUSY;
+ case HANGCHECK_ACTIVE:
+ ring->hangcheck.score += BUSY;
break;
- case kick:
- score = KICK;
+ case HANGCHECK_KICK:
+ ring->hangcheck.score += KICK;
break;
- case hung:
- score = HUNG;
+ case HANGCHECK_HUNG:
+ ring->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
- ring->hangcheck.score += score;
}
} else {
/* Gradually reduce the count so that we catch DoS
@@ -2536,9 +2040,17 @@ void i915_hangcheck_elapsed(unsigned long data)
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies +
- DRM_I915_HANGCHECK_JIFFIES));
+ i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!i915_enable_hangcheck)
+ return;
+
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2072,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- atomic_set(&dev_priv->irq_received, 0);
-
- I915_WRITE(HWSTAM, 0xeffe);
-
- /* XXX hotplug from PCH */
-
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- POSTING_READ(DEIER);
+ struct drm_i915_private *dev_priv = dev->dev_private;
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
- ibx_irq_preinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* and PM */
+ I915_WRITE(GEN6_PMIMR, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0x0);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static void ivybridge_irq_preinstall(struct drm_device *dev)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2592,21 +2099,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
- /* XXX hotplug from PCH */
-
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
-
- /* Power management */
- I915_WRITE(GEN6_PMIMR, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0x0);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_preinstall(dev);
ibx_irq_preinstall(dev);
}
@@ -2627,9 +2124,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
+
+ gen5_gt_irq_preinstall(dev);
I915_WRITE(DPINVGTT, 0xff);
@@ -2648,22 +2144,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
- u32 mask = ~I915_READ(SDEIMR);
- u32 hotplug;
+ u32 hotplug_irqs, hotplug, enabled_irqs = 0;
if (HAS_PCH_IBX(dev)) {
- mask &= ~SDE_HOTPLUG_MASK;
+ hotplug_irqs = SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_ibx[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
- mask &= ~SDE_HOTPLUG_MASK_CPT;
+ hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_cpt[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
- I915_WRITE(SDEIMR, ~mask);
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2195,103 @@ static void ibx_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIMR, ~mask);
}
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
-
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
- DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
- u32 gt_irqs;
-
- dev_priv->irq_mask = ~display_mask;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pm_irqs, gt_irqs;
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask |
- DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
- POSTING_READ(DEIER);
+ pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
+ if (HAS_L3_GPU_CACHE(dev)) {
+ /* L3 parity interrupt is always unmasked. */
+ dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ }
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT;
-
- if (IS_GEN6(dev))
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- else
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN5(dev)) {
gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
ILK_BSD_USER_INTERRUPT;
+ } else {
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+ }
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, gt_irqs);
POSTING_READ(GTIER);
- ibx_irq_postinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ pm_irqs |= GEN6_PM_RPS_EVENTS;
- if (IS_IRONLAKE_M(dev)) {
- /* Enable PCU event interrupts
- *
- * spinlocking not required here for correctness since interrupt
- * setup is guaranteed to run in single-threaded context. But we
- * need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (HAS_VEBOX(dev))
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- return 0;
+ dev_priv->pm_irq_mask = 0xffffffff;
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+ I915_WRITE(GEN6_PMIER, pm_irqs);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
+ unsigned long irqflags;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask =
- DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
- DE_PLANEC_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB |
- DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB;
- u32 pm_irqs = GEN6_PM_RPS_EVENTS;
- u32 gt_irqs;
+ u32 display_mask, extra_mask;
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+ DE_ERR_INT_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
+
+ I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+ DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ }
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
- I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER,
- display_mask |
- DE_PIPEC_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ I915_WRITE(DEIER, display_mask | extra_mask);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
- if (HAS_VEBOX(dev))
- pm_irqs |= PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
-
- /* Our enable/disable rps functions may touch these registers so
- * make sure to set a known state for only the non-RPS bits.
- * The RMW is extra paranoia since this should be called after being set
- * to a known state in preinstall.
- * */
- I915_WRITE(GEN6_PMIMR,
- (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
- I915_WRITE(GEN6_PMIER,
- (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_postinstall(dev);
ibx_irq_postinstall(dev);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Enable PCU event interrupts
+ *
+ * spinlocking not required here for correctness since interrupt
+ * setup is guaranteed to run in single-threaded context. But we
+ * need it to make the assert_spin_locked happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
return 0;
}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 gt_irqs;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2317,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
+ gen5_gt_irq_postinstall(dev);
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3001,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
- int irq_received;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -3035,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3323,6 +2794,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 error_mask;
+ unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2813,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
* Enable some error detection, note the instruction error mask
@@ -3616,15 +3092,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- /* Share uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ivybridge_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -3683,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+
+/* Disable interrupts so we can allow Package C8+. */
+void hsw_pc8_disable_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
+ dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
+ dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
+ dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
+ dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
+
+ ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
+ ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+ ilk_disable_gt_irq(dev_priv, 0xffffffff);
+ snb_disable_pm_irq(dev_priv, 0xffffffff);
+
+ dev_priv->pc8.irqs_disabled = true;
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/* Restore interrupts so we can recover from Package C8+. */
+void hsw_pc8_restore_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t val, expected;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ val = I915_READ(DEIMR);
+ expected = ~DE_PCH_EVENT_IVB;
+ WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+ val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
+ expected = ~SDE_HOTPLUG_MASK_CPT;
+ WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
+ val, expected);
+
+ val = I915_READ(GTIMR);
+ expected = 0xffffffff;
+ WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+ val = I915_READ(GEN6_PMIMR);
+ expected = 0xffffffff;
+ WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
+ expected);
+
+ dev_priv->pc8.irqs_disabled = false;
+
+ ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
+ ibx_enable_display_interrupt(dev_priv,
+ ~dev_priv->pc8.regsave.sdeimr &
+ ~SDE_HOTPLUG_MASK_CPT);
+ ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
+ snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
+ I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 342f1f33616..b6a58f720f9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,12 @@
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
+#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
+#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
+#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
+#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -363,6 +369,7 @@
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +687,7 @@
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -1127,7 +1135,8 @@
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -1440,6 +1449,8 @@
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
+#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
+
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1696,15 +1707,26 @@
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
+/*
+ * Notes on SNB/IVB/VLV context size:
+ * - Power context is saved elsewhere (LLC or stolen)
+ * - Ring/execlist context is saved on SNB, not on IVB
+ * - Extended context size already includes render context size
+ * - We always need to follow the extended context size.
+ * SNB BSpec has comments indicating that we should use the
+ * render context size instead if execlists are disabled, but
+ * based on empirical testing that's just nonsense.
+ * - Pipelined/VF state is saved on SNB/IVB respectively
+ * - GT1 size just indicates how much of render context
+ * doesn't need saving on GT1
+ */
#define CXT_SIZE 0x21a0
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
-#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
- GEN6_CXT_RING_SIZE(cxt_reg) + \
- GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
@@ -1714,11 +1736,7 @@
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
-#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
- GEN7_CXT_RING_SIZE(ctx_reg) + \
- GEN7_CXT_RENDER_SIZE(ctx_reg) + \
- GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
- GEN7_CXT_GT1_SIZE(ctx_reg) + \
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -1778,6 +1796,71 @@
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+/* HSW eDP PSR registers */
+#define EDP_PSR_CTL 0x64800
+#define EDP_PSR_ENABLE (1<<31)
+#define EDP_PSR_LINK_DISABLE (0<<27)
+#define EDP_PSR_LINK_STANDBY (1<<27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
+#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
+#define EDP_PSR_TP1_TP2_SEL (0<<11)
+#define EDP_PSR_TP1_TP3_SEL (1<<11)
+#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
+#define EDP_PSR_TP1_TIME_500us (0<<4)
+#define EDP_PSR_TP1_TIME_100us (1<<4)
+#define EDP_PSR_TP1_TIME_2500us (2<<4)
+#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_IDLE_FRAME_SHIFT 0
+
+#define EDP_PSR_AUX_CTL 0x64810
+#define EDP_PSR_AUX_DATA1 0x64814
+#define EDP_PSR_DPCD_COMMAND 0x80060000
+#define EDP_PSR_AUX_DATA2 0x64818
+#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
+#define EDP_PSR_AUX_DATA3 0x6481c
+#define EDP_PSR_AUX_DATA4 0x64820
+#define EDP_PSR_AUX_DATA5 0x64824
+
+#define EDP_PSR_STATUS_CTL 0x64840
+#define EDP_PSR_STATUS_STATE_MASK (7<<29)
+#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
+#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
+#define EDP_PSR_STATUS_LINK_MASK (3<<26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
+#define EDP_PSR_STATUS_COUNT_SHIFT 16
+#define EDP_PSR_STATUS_COUNT_MASK 0xf
+#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
+#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_IDLE_MASK 0xf
+
+#define EDP_PSR_PERF_CNT 0x64844
+#define EDP_PSR_PERF_CNT_MASK 0xffffff
+
+#define EDP_PSR_DEBUG_CTL 0x64860
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
@@ -2053,6 +2136,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -2200,6 +2284,8 @@
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
+#define HSW_BLC_PWM2_CTL 0x48350
+
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
@@ -2208,6 +2294,12 @@
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
+#define UTIL_PIN_CTL 0x48400
+#define UTIL_PIN_ENABLE (1 << 31)
+
+#define PCH_GTC_CTL 0xe7000
+#define PCH_GTC_ENABLE (1 << 31)
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -3121,9 +3213,6 @@
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
-#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
-#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
-#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
@@ -3170,12 +3259,6 @@
#define SSKPD_WM2_SHIFT 16
#define SSKPD_WM3_SHIFT 24
-#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
-#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
-#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
-#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
-#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
-
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -3726,6 +3809,9 @@
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
+
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
@@ -3888,6 +3974,7 @@
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
@@ -4081,6 +4168,8 @@
_TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_VS_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(trans) \
@@ -4088,6 +4177,13 @@
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+#define HSW_STEREO_3D_CTL_A 0x70020
+#define S3D_ENABLE (1<<31)
+#define HSW_STEREO_3D_CTL_B 0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+ _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
#define _PCH_TRANS_HSYNC_B 0xe1008
@@ -4476,6 +4572,10 @@
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define HSW_IDICR 0x9008
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+#define HSW_EDRAM_PRESENT 0x120010
+
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4744,8 +4844,8 @@
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
-#define HSW_PWR_WELL_ENABLE (1<<31)
-#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
+#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
@@ -4866,7 +4966,8 @@
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
-#define SBI_DBUFF0_ENABLE (1<<0)
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4932,7 +5033,14 @@
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+
+#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
+#define D_COMP_COMP_FORCE (1<<8)
+#define D_COMP_COMP_DISABLE (1<<0)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c6..a777e7f3b0d 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct i915_error_state_file_priv error_priv;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ memset(&error_priv, 0, sizeof(error_priv));
+
+ ret = i915_error_state_buf_init(&error_str, count, off);
+ if (ret)
+ return ret;
+
+ error_priv.dev = dev;
+ i915_error_state_get(dev, &error_priv);
+
+ ret = i915_error_state_to_str(&error_str, &error_priv);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_put(&error_priv);
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("gen6 sysfs setup failed\n");
}
+
+ ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+ &error_state_attr);
+ if (ret)
+ DRM_ERROR("error_state sysfs setup failed\n");
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a681771..e2c5ee6f619 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
-TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
- TP_ARGS(obj, mappable),
+TRACE_EVENT(i915_vma_bind,
+ TP_PROTO(struct i915_vma *vma, bool mappable),
+ TP_ARGS(vma, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, offset=%08x size=%x%s",
+ TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
- __entry->mappable ? ", mappable" : "")
+ __entry->mappable ? ", mappable" : "",
+ __entry->vm)
);
-TRACE_EVENT(i915_gem_object_unbind,
- TP_PROTO(struct drm_i915_gem_object *obj),
- TP_ARGS(obj),
+TRACE_EVENT(i915_vma_unbind,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x",
- __entry->obj, __entry->offset, __entry->size)
+ TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ __entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,
@@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
-TRACE_EVENT(i915_reg_rw,
- TP_PROTO(bool write, u32 reg, u64 val, int len),
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
- TP_ARGS(write, reg, val, len),
+ TP_CONDITION(trace),
TP_STRUCT__entry(
__field(u64, val)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bcbbaea2a78..57fe1ae32a0 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
-static int intel_dsm(acpi_handle handle, int func, int arg)
+static int intel_dsm(acpi_handle handle, int func)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = arg;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (ret) {
@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = 0;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
&output);
@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
- ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
if (ret < 0) {
DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3acec8c4816..b5a3875f22c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -52,15 +52,14 @@ struct intel_crt {
u32 adpa_reg;
};
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_crt, base);
+ return container_of(encoder, struct intel_crt, base);
}
-static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
- return container_of(encoder, struct intel_crt, base);
+ return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_crt_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_crt_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crt *crt =
- intel_encoder_to_crt(to_intel_encoder(encoder));
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (HAS_PCH_SPLIT(dev))
@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 0)
+ adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+ else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
I915_WRITE(crt->adpa_reg, adpa);
}
@@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
@@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
- .mode_set = intel_crt_mode_set,
-};
-
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
@@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
+ crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
@@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b042ee5c407..63aca49d11a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
- bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
- const u32 *ddi_translations = ((use_fdi_mode) ?
+ const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
- hsw_ddi_translations_dp);
+ hsw_ddi_translations_dp;
- DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
- port_name(port),
- use_fdi_mode ? "FDI" : "DP");
-
- WARN((use_fdi_mode && (port != PORT_E)),
- "Programming port %c in FDI mode, this probably will not work.\n",
- port_name(port));
-
- for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i = 0, reg = DDI_BUF_TRANS(port);
+ i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
if (!HAS_DDI(dev))
return;
- for (port = PORT_A; port < PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port, false);
-
- /* DDI E is the suggested one to work in FDI mode, so program is as such
- * by default. It will have to be re-programmed in case a digital DP
- * output will be detected on it
- */
- intel_prepare_ddi_buffers(dev, PORT_E, true);
+ for (port = PORT_A; port <= PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port);
}
static const long hsw_ddi_buf_ctl_values[] = {
@@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
-static void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct intel_encoder *encoder)
{
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- int port = intel_ddi_get_encoder_port(intel_encoder);
- int pipe = intel_crtc->pipe;
- int type = intel_encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int port = intel_ddi_get_encoder_port(encoder);
+ int pipe = crtc->pipe;
+ int type = encoder->type;
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
- intel_crtc->eld_vld = false;
+ crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
+ enc_to_dig_port(&encoder->base);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
@@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
@@ -1118,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
+ intel_edp_psr_enable(intel_dp);
}
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_psr_disable(intel_dp);
ironlake_edp_backlight_off(intel_dp);
}
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
- if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ return 800000;
+ else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
- else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
- LCPLL_CLK_FREQ_450)
+ else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_ULT(dev_priv->dev))
return 337500;
@@ -1309,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
-static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
- .mode_set = intel_ddi_mode_set,
-};
-
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1337,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
intel_encoder->compute_config = intel_ddi_compute_config;
+ intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index be79f477a38..38452d82ac7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+
+static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+
+
typedef struct {
int min, max;
} intel_range_t;
@@ -54,7 +63,6 @@ typedef struct {
int p2_slow, p2_fast;
} intel_p2_t;
-#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
@@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev)
return 27;
}
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
@@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 2 },
};
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
+ else
+ limit = &intel_limits_i8xx_dac;
}
return limit;
}
@@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
- u32 updrate, minupdate, fracbits, p;
+ u32 updrate, minupdate, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
@@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
- fracbits = 1;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
@@ -892,8 +914,8 @@ static const char *state_string(bool enabled)
}
/* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-static struct intel_shared_dpll *
+struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
}
/* For ILK+ */
-static void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
-static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
+ bool cur_state;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+ cur_state = !!(val & FDI_RX_PLL_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
}
/* Need to check both planes against the pipe */
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
-/**
- * intel_enable_pll - enable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
- * make sure the PLL reg is writable first though, since the panel write
- * protect mechanism may be enabled.
- *
- * Note! This is for pre-ILK only.
- *
- * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
- */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_enable_pll(struct intel_crtc *crtc)
{
- int reg;
- u32 val;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
/* No really, not for ILK+ */
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
- assert_panel_unlocked(dev_priv, pipe);
+ assert_panel_unlocked(dev_priv, crtc->pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+ I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ I915_WRITE(reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DPLL_MD(crtc->pipe),
+ crtc->config.dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
/**
- * intel_disable_pll - disable a PLL
+ * i9xx_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
@@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* Note! This is for pre-ILK only.
*/
-static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- int reg;
- u32 val;
-
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
@@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(DPLL(pipe), 0);
+ POSTING_READ(DPLL(pipe));
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
@@ -1828,7 +1885,7 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_crtc->dspaddr_offset = linear_offset;
}
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
+ /* Update pipe size and adjust fitter if needed */
+ if (i915_fastboot) {
+ I915_WRITE(PIPESRC(intel_crtc->pipe),
+ ((crtc->mode.hdisplay - 1) << 16) |
+ (crtc->mode.vdisplay - 1));
+ if (!intel_crtc->config.pch_pfit.size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+ }
+ }
+
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, j;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
I915_READ(FDI_RX_IIR(pipe)));
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ I915_WRITE(reg, temp);
- POSTING_READ(reg);
- udelay(150);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ I915_WRITE(reg, temp);
- for (i = 0; i < 4; i++) {
+ /* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- POSTING_READ(reg);
- udelay(500);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
- reg = FDI_RX_IIR(pipe);
+ reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
- break;
- }
- }
- if (i == 4)
- DRM_ERROR("FDI train 1 fail!\n");
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(reg, temp);
+ POSTING_READ(reg);
+ udelay(1); /* should be 0.5us */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- I915_WRITE(reg, temp);
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- POSTING_READ(reg);
- udelay(150);
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
- for (i = 0; i < 4; i++) {
+ /* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
I915_WRITE(reg, temp);
POSTING_READ(reg);
- udelay(500);
+ udelay(2); /* should be 1.5us */
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
- break;
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
}
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
}
- if (i == 4)
- DRM_ERROR("FDI train 2 fail!\n");
+train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
@@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that to have
- * the right LVDS enable sequence. */
- ironlake_enable_shared_dpll(intel_crtc);
-
+ /* We need to program the right clock selection before writing the pixel
+ * mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev)) {
u32 sel;
@@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that to have
+ * the right LVDS enable sequence. */
+ ironlake_enable_shared_dpll(intel_crtc);
+
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
- i = crtc->pipe;
+ i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (pll->refcount == 0)
continue;
- if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(pll->id))) {
+ if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+ sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
@@ -3096,13 +3174,7 @@ found:
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- /* Wait for the clocks to stabilize before rewriting the regs */
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- I915_WRITE(PCH_FP0(pll->id), fp);
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+ pll->mode_set(dev_priv, pll);
}
pll->refcount++;
@@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 temp;
WARN_ON(!crtc->enabled);
@@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
-
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
assert_fdi_rx_disabled(dev_priv, pipe);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
-
ironlake_pfit_enable(intel_crtc);
/*
@@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
/* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
hsw_disable_ips(intel_crtc);
@@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- mutex_lock(&dev_priv->dpio_lock);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- intel_enable_pll(dev_priv, pipe);
+ vlv_enable_pll(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- /* VLV wants encoder enabling _before_ the pipe is up. */
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_fbc(dev);
- mutex_unlock(&dev_priv->dpio_lock);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- intel_enable_pll(dev_priv, pipe);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ i9xx_enable_pll(intel_crtc);
+
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- intel_disable_pll(dev_priv, pipe);
+ i9xx_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
intel_update_fbc(dev);
@@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
}
}
-void intel_modeset_disable(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled)
- intel_crtc_disable(crtc);
- }
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Simple dpms helper for encodres with just one connector, no cloning and only
+/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
-void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
@@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
- pipe_config->pipe_bpp == 24;
+ pipe_config->pipe_bpp <= 24;
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- /* All interlaced capable intel hw wants timings in frames. Note though
- * that intel_lvds_mode_fixup does some funny tricks with the crtc
- * timings, so we need to be careful not to clobber these.*/
- if (!pipe_config->timings_set)
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
@@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
return 200000;
}
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ return 267000;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ return 333000;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ return 444000;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ return 200000;
+ default:
+ DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ return 133000;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ return 167000;
+ }
+}
+
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
@@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
}
I915_WRITE(FP0(pipe), fp);
+ crtc->config.dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
+ crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
+ crtc->config.dpll_hw_state.fp1 = fp;
}
}
@@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll, mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_hdmi;
u32 coreclk, reg_val, dpll_md;
mutex_lock(&dev_priv->dpio_lock);
- is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
-
bestn = crtc->config.dpll.n;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2;
@@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
- 0x005f0021);
+ 0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x00d0000f);
@@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ crtc->config.dpll_hw_state.dpll = dpll;
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
@@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
}
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
@@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- if (crtc->config.has_dp_encoder)
- intel_dp_set_m_n(crtc);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
+ crtc->config.dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
+
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
}
static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
@@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
}
+static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+
+ crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+ crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
+ crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+ crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+
+ crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+ crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+ crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+ crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+
+ crtc->mode.flags = pipe_config->adjusted_mode.flags;
+
+ crtc->mode.clock = pipe_config->adjusted_mode.clock;
+ crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+}
+
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -4966,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ if (!IS_VALLEYVIEW(dev)) {
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
return true;
}
@@ -5119,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
BUG_ON(val != final);
}
-/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
- bool has_vga = false;
- bool is_sdv = false;
- u32 tmp;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- switch (encoder->type) {
- case INTEL_OUTPUT_ANALOG:
- has_vga = true;
- break;
- }
- }
-
- if (!has_vga)
- return;
-
- mutex_lock(&dev_priv->dpio_lock);
-
- /* XXX: Rip out SDV support once Haswell ships for real. */
- if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
- is_sdv = true;
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_DISABLE;
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ uint32_t tmp;
- udelay(24);
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
- if (!is_sdv) {
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
-
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
- 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
- }
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+ uint32_t tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
- tmp |= 0x7FFF;
- intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,17 +5215,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
- }
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
@@ -5253,34 +5245,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+ bool with_fdi)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
+
+ if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ with_spread = true;
+ if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ udelay(24);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ if (with_fdi) {
+ lpt_reset_fdi_mphy(dev_priv);
+ lpt_program_fdi_mphy(dev_priv);
+ }
}
- /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
- tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
- tmp |= SBI_DBUFF0_ENABLE;
- intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
mutex_unlock(&dev_priv->dpio_lock);
}
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (has_vga)
+ lpt_enable_clkout_dp(dev, true, true);
+ else
+ lpt_disable_clkout_dp(dev);
+}
+
/*
* Initialize reference clocks when the driver loads
*/
@@ -5610,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+ pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
@@ -5720,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
if (is_lvds && has_reduced_clock && i915_powersave)
intel_crtc->lowfreq_avail = true;
else
@@ -5732,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- if (has_reduced_clock)
- I915_WRITE(PCH_FP1(pll->id), fp2);
- else
- I915_WRITE(PCH_FP1(pll->id), fp);
}
intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
- /* XXX: Can't properly read out the pch dpll pixel multiplier
- * since we don't have state tracking for pch clocks yet. */
- pipe_config->pixel_multiplier = 1;
-
if (HAS_PCH_IBX(dev_priv->dev)) {
- pipe_config->shared_dpll = crtc->pipe;
+ pipe_config->shared_dpll =
+ (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
+
+ tmp = pipe_config->dpll_hw_state.dpll;
+ pipe_config->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5867,6 +5926,305 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
return true;
}
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *crtc;
+ unsigned long irqflags;
+ uint32_t val;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ WARN(plls->spll_refcount, "SPLL enabled\n");
+ WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+ WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ val = I915_READ(DEIMR);
+ WARN((val & ~DE_PCH_EVENT_IVB) != val,
+ "Unexpected DEIMR bits enabled: 0x%x\n", val);
+ val = I915_READ(SDEIMR);
+ WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
+ "Unexpected SDEIMR bits enabled: 0x%x\n", val);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ uint32_t val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ POSTING_READ(D_COMP);
+ ndelay(100);
+
+ if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /* Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine! */
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ POSTING_READ(D_COMP);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+}
+
+void hsw_enable_pc8_work(struct work_struct *__work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(to_delayed_work(__work), struct drm_i915_private,
+ pc8.enable_work);
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ if (dev_priv->pc8.enabled)
+ return;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ dev_priv->pc8.enabled = true;
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev);
+ hsw_pc8_disable_interrupts(dev);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
+ WARN(dev_priv->pc8.disable_count < 1,
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
+
+ dev_priv->pc8.disable_count--;
+ if (dev_priv->pc8.disable_count != 0)
+ return;
+
+ schedule_delayed_work(&dev_priv->pc8.enable_work,
+ msecs_to_jiffies(i915_pc8_timeout));
+}
+
+static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
+ WARN(dev_priv->pc8.disable_count < 0,
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
+
+ dev_priv->pc8.disable_count++;
+ if (dev_priv->pc8.disable_count != 1)
+ return;
+
+ cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
+ if (!dev_priv->pc8.enabled)
+ return;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ hsw_pc8_restore_interrupts(dev);
+ lpt_init_pch_refclk(dev);
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ intel_prepare_ddi(dev);
+ i915_gem_init_swizzling(dev);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ dev_priv->pc8.enabled = false;
+}
+
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->pc8.lock);
+ __hsw_enable_package_c8(dev_priv);
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->pc8.lock);
+ __hsw_disable_package_c8(dev_priv);
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+ uint32_t val;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ if (crtc->base.enabled)
+ return false;
+
+ /* This case is still possible since we have the i915.disable_power_well
+ * parameter and also the KVMr or something else might be requesting the
+ * power well. */
+ val = I915_READ(HSW_PWR_WELL_DRIVER);
+ if (val != 0) {
+ DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Since we're called from modeset_global_resources there's no way to
+ * symmetrically increase and decrease the refcount, so we use
+ * dev_priv->pc8.requirements_met to track whether we already have the refcount
+ * or not.
+ */
+static void hsw_update_package_c8(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool allow;
+
+ if (!i915_enable_pc8)
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
+
+ allow = hsw_can_enable_package_c8(dev_priv);
+
+ if (allow == dev_priv->pc8.requirements_met)
+ goto done;
+
+ dev_priv->pc8.requirements_met = allow;
+
+ if (allow)
+ __hsw_enable_package_c8(dev_priv);
+ else
+ __hsw_disable_package_c8(dev_priv);
+
+done:
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->pc8.gpu_idle) {
+ dev_priv->pc8.gpu_idle = true;
+ hsw_enable_package_c8(dev_priv);
+ }
+}
+
+static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->pc8.gpu_idle) {
+ dev_priv->pc8.gpu_idle = false;
+ hsw_disable_package_c8(dev_priv);
+ }
+}
+
static void haswell_modeset_global_resources(struct drm_device *dev)
{
bool enable = false;
@@ -5882,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
}
intel_set_power_well(dev, enable);
+
+ hsw_update_package_c8(dev);
}
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -5935,7 +6295,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6005,11 +6365,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
@@ -6028,12 +6385,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
- if (encoder->mode_set) {
- encoder->mode_set(encoder);
- } else {
- encoder_funcs = encoder->base.helper_private;
- encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
- }
+ encoder->mode_set(encoder);
}
return 0;
@@ -6548,7 +6900,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = obj->gtt_offset;
+ addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -6570,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
- i915_gem_object_unpin(intel_crtc->cursor_bo);
+ i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
@@ -6585,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
@@ -6875,11 +7227,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
@@ -6918,7 +7271,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- return 0;
+ pipe_config->adjusted_mode.clock = 0;
+ return;
}
if (IS_PINEVIEW(dev))
@@ -6955,12 +7309,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
}
- /* XXX: It would be nice to validate the clocks, but we can't reuse
- * i830PllIsValid() because it relies on the xf86_config connector
- * configuration being accurate, which it isn't necessarily.
+ pipe_config->adjusted_mode.clock = clock.dot *
+ pipe_config->pixel_multiplier;
+}
+
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ int link_freq, repeat;
+ u64 clock;
+ u32 link_m, link_n;
+
+ repeat = pipe_config->pixel_multiplier;
+
+ /*
+ * The calculation for the data clock is:
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+ * But we want to avoid losing precison if possible, so:
+ * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+ *
+ * and the link clock is simpler:
+ * link_clock = (m * link_clock * repeat) / n
+ */
+
+ /*
+ * We need to get the FDI or DP link clock here to derive
+ * the M/N dividers.
+ *
+ * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
+ * For DP, it's either 1.62GHz or 2.7GHz.
+ * We do our calculations in 10*MHz since we don't need much precison.
*/
+ if (pipe_config->has_pch_encoder)
+ link_freq = intel_fdi_link_freq(dev) * 10000;
+ else
+ link_freq = pipe_config->port_clock;
- return clock.dot;
+ link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
+ link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+
+ if (!link_m || !link_n)
+ return;
+
+ clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
+ do_div(clock, link_n);
+
+ pipe_config->adjusted_mode.clock = clock;
}
/** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7368,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
+ struct intel_crtc_config pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7378,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
if (!mode)
return NULL;
- mode->clock = intel_crtc_clock_get(dev, crtc);
+ /*
+ * Construct a pipe_config sufficient for getting the clock info
+ * back out of crtc_clock_get.
+ *
+ * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+ * to use a real value here instead.
+ */
+ pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ pipe_config.pixel_multiplier = 1;
+ i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+ mode->clock = pipe_config.adjusted_mode.clock;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7064,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
void intel_mark_busy(struct drm_device *dev)
{
- i915_update_gfx_val(dev->dev_private);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ hsw_package_c8_gpu_busy(dev_priv);
+ i915_update_gfx_val(dev_priv);
}
void intel_mark_idle(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ hsw_package_c8_gpu_idle(dev_priv);
+
if (!i915_powersave)
return;
@@ -7235,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7263,7 +7679,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7279,7 +7695,8 @@ err:
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7304,7 +7721,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7320,7 +7737,8 @@ err:
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7344,7 +7762,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7368,7 +7786,8 @@ err:
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7387,7 +7806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7418,7 +7837,8 @@ err:
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7452,7 +7872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
@@ -7468,14 +7888,16 @@ err:
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
return -ENODEV;
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7545,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
if (ret)
goto cleanup_pending;
@@ -7789,7 +8211,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp, ret = -EINVAL;
@@ -7806,9 +8227,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
- pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
+ pipe_config->cpu_transcoder =
+ (enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ /*
+ * Sanitize sync polarity flags based on requested ones. If neither
+ * positive or negative polarity is requested, treat this as meaning
+ * negative polarity.
+ */
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
@@ -7823,6 +8258,9 @@ encoder_retry:
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
+ /* Fill in default crtc timings, allow encoders to overwrite them. */
+ drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -7833,20 +8271,8 @@ encoder_retry:
if (&encoder->new_crtc->base != crtc)
continue;
- if (encoder->compute_config) {
- if (!(encoder->compute_config(encoder, pipe_config))) {
- DRM_DEBUG_KMS("Encoder config failure\n");
- goto fail;
- }
-
- continue;
- }
-
- encoder_funcs = encoder->base.helper_private;
- if (!(encoder_funcs->mode_fixup(&encoder->base,
- &pipe_config->requested_mode,
- &pipe_config->adjusted_mode))) {
- DRM_DEBUG_KMS("Encoder fixup failed\n");
+ if (!(encoder->compute_config(encoder, pipe_config))) {
+ DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
}
@@ -8041,6 +8467,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
}
+static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
+ struct intel_crtc_config *new)
+{
+ int clock1, clock2, diff;
+
+ clock1 = cur->adjusted_mode.clock;
+ clock2 = new->adjusted_mode.clock;
+
+ if (clock1 == clock2)
+ return true;
+
+ if (!clock1 || !clock2)
+ return false;
+
+ diff = abs(clock1 - clock2);
+
+ if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+ return true;
+
+ return false;
+}
+
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
@@ -8072,7 +8520,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- DRM_ERROR("mismatch in " #name " " \
+ DRM_ERROR("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -8106,8 +8554,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
- if (!HAS_PCH_SPLIT(dev))
- PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8585,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
@@ -8146,6 +8594,15 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_QUIRK
+ if (!IS_HASWELL(dev)) {
+ if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
+ DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
+ current_config->adjusted_mode.clock,
+ pipe_config->adjusted_mode.clock);
+ return false;
+ }
+ }
+
return true;
}
@@ -8277,6 +8734,9 @@ check_crtc_state(struct drm_device *dev)
encoder->get_config(encoder, &pipe_config);
}
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc, &pipe_config);
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
@@ -8454,9 +8914,9 @@ out:
return ret;
}
-int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
int ret;
@@ -8573,8 +9033,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
- config->mode_changed = true;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(set->crtc);
+
+ if (intel_crtc->active && i915_fastboot) {
+ DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+ config->fb_changed = true;
+ } else {
+ DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+ config->mode_changed = true;
+ }
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
@@ -8594,6 +9062,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
+
+ DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
+ set->crtc->base.id, config->mode_changed, config->fb_changed);
}
static int
@@ -8604,14 +9075,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
- int count, ro;
+ int ro;
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
@@ -8645,7 +9115,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder)
@@ -8804,19 +9273,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t reg, val;
-
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8825,7 +9307,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
- uint32_t reg, val;
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8833,11 +9314,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8856,6 +9334,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+ dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
dev_priv->shared_dplls[i].get_hw_state =
@@ -9035,8 +9514,13 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
+ PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
+ PORT_C);
+ }
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@@ -9096,13 +9580,17 @@ static void intel_setup_outputs(struct drm_device *dev)
drm_helper_move_panel_connectors_to_head(dev);
}
+void intel_framebuffer_fini(struct intel_framebuffer *fb)
+{
+ drm_framebuffer_cleanup(&fb->base);
+ drm_gem_object_unreference_unlocked(&fb->obj->base);
+}
+
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
-
+ intel_framebuffer_fini(intel_fb);
kfree(intel_fb);
}
@@ -9272,6 +9760,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9279,6 +9768,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9286,6 +9776,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9303,9 +9794,12 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
@@ -9586,7 +10080,7 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
@@ -9792,6 +10286,17 @@ void i915_redisable_vga(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ /* This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses. */
+ if (HAS_POWER_WELL(dev) &&
+ (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+ return;
+
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
@@ -9862,6 +10367,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (!crtc->active)
+ continue;
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc,
+ &crtc->config);
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
@@ -9893,6 +10407,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
+ /*
+ * Now that we have the config, copy it to each CRTC struct
+ * Note that this could go away if we move to using crtc_config
+ * checking everywhere.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (crtc->active && i915_fastboot) {
+ intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
+
+ DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+ crtc->base.base.id);
+ drm_mode_debug_printmodeline(&crtc->base.mode);
+ }
+ }
+
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
@@ -9955,7 +10485,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -9979,7 +10508,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
@@ -10035,9 +10563,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_display_error_state {
u32 power_well_driver;
@@ -10151,8 +10676,7 @@ intel_display_capture_error_state(struct drm_device *dev)
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
- if (HAS_POWER_WELL(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ intel_uncore_clear_errors(dev);
return error;
}
@@ -10209,4 +10733,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a5..2151d13772b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size)
+static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
+ int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
- uint32_t ch_data = ch_ctl + 4;
- int i, ret, recv_bytes;
- uint32_t status;
- uint32_t aux_clock_divider;
- int try, precharge;
- bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- pm_qos_update_request(&dev_priv->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
@@ -307,29 +291,61 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (IS_VALLEYVIEW(dev)) {
- aux_clock_divider = 100;
+ return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
if (HAS_DDI(dev))
- aux_clock_divider = DIV_ROUND_CLOSEST(
- intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
- aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ return 225; /* eDP input clock at 450Mhz */
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
- aux_clock_divider = 74;
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
} else if (HAS_PCH_SPLIT(dev)) {
- aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
- aux_clock_divider = intel_hrawclk(dev) / 2;
+ return index ? 0 :intel_hrawclk(dev) / 2;
}
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t ch_data = ch_ctl + 4;
+ uint32_t aux_clock_divider;
+ int i, ret, recv_bytes;
+ uint32_t status;
+ int try, precharge, clock = 0;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+ intel_dp_check_edp(intel_dp);
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
+ intel_aux_display_runtime_get(dev_priv);
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ_NOTRACE(ch_ctl);
@@ -345,37 +361,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
-
- /* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
-
- /* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR))
- continue;
+ while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -416,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
ret = recv_bytes;
out:
pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+ intel_aux_display_runtime_put(dev_priv);
return ret;
}
@@ -710,8 +731,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp_bpp);
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+ }
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -812,15 +836,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
udelay(500);
}
-static void
-intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dp_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
/*
* There are four kinds of DP registers:
@@ -852,7 +875,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
@@ -1360,6 +1383,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
pipe_config->adjusted_mode.flags |= flags;
+
+ if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+ if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+}
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) &&
+ intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_vsc_psr psr_vsc;
+
+ if (intel_dp->psr_setup_done)
+ return;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD);
+
+ intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+ int precharge = 0x3;
+ int msg_size = 5; /* Header(4) + Message(1) */
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE &
+ ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE |
+ DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL,
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL, val |
+ EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!IS_HASWELL(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ dev_priv->no_psr_reason = PSR_NO_SOURCE;
+ return false;
+ }
+
+ if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+ (dig_port->port != PORT_A)) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
+ return false;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ dev_priv->no_psr_reason = PSR_NO_SINK;
+ return false;
+ }
+
+ if (!i915_enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ dev_priv->no_psr_reason = PSR_MODULE_PARAM;
+ return false;
+ }
+
+ crtc = dig_port->base.base.crtc;
+ if (crtc == NULL) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+ dev_priv->no_psr_reason = PSR_NOT_TILED;
+ return false;
+ }
+
+ if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+ dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
+ return false;
+ }
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ dev_priv->no_psr_reason = PSR_S3D_ENABLED;
+ return false;
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (!intel_edp_psr_match_conditions(intel_dp) ||
+ intel_edp_is_psr_enabled(dev))
+ return;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ /* Enable PSR on the panel */
+ intel_edp_psr_enable_sink(intel_dp);
+
+ /* Enable PSR on the host */
+ intel_edp_psr_enable_source(intel_dp);
+}
+
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp) &&
+ !intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!intel_edp_is_psr_enabled(dev))
+ return;
+
+ I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!is_edp_psr(intel_dp))
+ return;
+
+ if (!intel_edp_psr_match_conditions(intel_dp))
+ intel_edp_psr_disable(intel_dp);
+ else
+ if (!intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+ }
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1411,47 +1703,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
+}
- if (IS_VALLEYVIEW(dev)) {
- struct intel_digital_port *dport =
- enc_to_dig_port(&encoder->base);
- int channel = vlv_dport_to_channel(dport);
-
- vlv_wait_port_ready(dev_priv, channel);
- }
+static void vlv_enable_dp(struct intel_encoder *encoder)
+{
}
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+
+ if (dport->port == PORT_A)
+ ironlake_edp_pll_on(intel_dp);
+}
+
+static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
- if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
- ironlake_edp_pll_on(intel_dp);
+ mutex_lock(&dev_priv->dpio_lock);
- if (IS_VALLEYVIEW(dev)) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
-
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
- 0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
- 0x00400888);
- }
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, port);
}
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1465,6 +1760,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1478,6 +1774,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -1689,6 +1986,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@@ -1697,6 +1995,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+ mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@@ -2030,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
- bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
@@ -2048,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
- clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
@@ -2069,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
- clock_recovery = true;
break;
}
@@ -2275,6 +2571,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
+ /* Check if the panel supports PSR */
+ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+ intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (is_edp_psr(intel_dp))
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2542,6 +2845,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct edid *edid = NULL;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -2735,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .mode_set = intel_dp_mode_set,
-};
-
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
@@ -3166,6 +3468,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
+ intel_dp->psr_setup_done = false;
+
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
if (is_edp(intel_dp)) {
@@ -3216,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ } else {
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->enable = intel_enable_dp;
+ }
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b7d6e09456c..176080822a7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
+#include <linux/hdmi.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@@ -208,10 +209,6 @@ struct intel_crtc_config {
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
- /* This flag must be set by the encoder's compute_config callback if it
- * changes the crtc timings in the mode to prevent the crtc fixup from
- * overwriting them. Currently only lvds needs that. */
- bool timings_set;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@@ -334,6 +331,13 @@ struct intel_crtc {
bool pch_fifo_underrun_disabled;
};
+struct intel_plane_wm_parameters {
+ uint32_t horiz_pixels;
+ uint8_t bytes_per_pixel;
+ bool enabled;
+ bool scaled;
+};
+
struct intel_plane {
struct drm_plane base;
int plane;
@@ -352,20 +356,18 @@ struct intel_plane {
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
- struct {
- bool enable;
- uint8_t bytes_per_pixel;
- uint32_t horiz_pixels;
- } wm;
+ struct intel_plane_wm_parameters wm;
void (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
- void (*disable_plane)(struct drm_plane *plane);
+ void (*disable_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@@ -397,66 +399,6 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define DIP_HEADER_SIZE 5
-
-#define DIP_TYPE_AVI 0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI 13
-#define DIP_AVI_PR_1 0
-#define DIP_AVI_PR_2 1
-#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
-
-#define DIP_TYPE_SPD 0x83
-#define DIP_VERSION_SPD 0x1
-#define DIP_LEN_SPD 25
-#define DIP_SPD_UNKNOWN 0
-#define DIP_SPD_DSTB 0x1
-#define DIP_SPD_DVDP 0x2
-#define DIP_SPD_DVHS 0x3
-#define DIP_SPD_HDDVR 0x4
-#define DIP_SPD_DVC 0x5
-#define DIP_SPD_DSC 0x6
-#define DIP_SPD_VCD 0x7
-#define DIP_SPD_GAME 0x8
-#define DIP_SPD_PC 0x9
-#define DIP_SPD_BD 0xa
-#define DIP_SPD_SCD 0xb
-
-struct dip_infoframe {
- uint8_t type; /* HB0 */
- uint8_t ver; /* HB1 */
- uint8_t len; /* HB2 - body len, not including checksum */
- uint8_t ecc; /* Header ECC */
- uint8_t checksum; /* PB0 */
- union {
- struct {
- /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
- uint8_t Y_A_B_S;
- /* PB2 - C 7:6, M 5:4, R 3:0 */
- uint8_t C_M_R;
- /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
- uint8_t ITC_EC_Q_SC;
- /* PB4 - VIC 6:0 */
- uint8_t VIC;
- /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
- uint8_t YQ_CN_PR;
- /* PB6 to PB13 */
- uint16_t top_bar_end;
- uint16_t bottom_bar_start;
- uint16_t left_bar_end;
- uint16_t right_bar_start;
- } __attribute__ ((packed)) avi;
- struct {
- uint8_t vn[8];
- uint8_t pd[16];
- uint8_t sdi;
- } __attribute__ ((packed)) spd;
- uint8_t payload[27];
- } __attribute__ ((packed)) body;
-} __attribute__((packed));
-
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
@@ -467,7 +409,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@@ -487,6 +430,7 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
@@ -498,6 +442,7 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ bool psr_setup_done;
struct intel_connector *attached_connector;
};
@@ -549,13 +494,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_fbc_work {
- struct delayed_work work;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- int interval;
-};
-
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector,
@@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
-extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
@@ -639,14 +576,10 @@ struct intel_set_config {
bool mode_changed;
};
-extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
-extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
@@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_enable_clock_gating(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
+extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width,
- int pixel_size, bool enable);
+extern void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled);
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
@@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,8 +744,8 @@ extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
+void gen6_update_ring_freq(struct drm_device *dev);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
@@ -825,4 +773,24 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
+extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_update(struct drm_device *dev);
+extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down);
+extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
+extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask);
+extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask);
+extern void hsw_enable_pc8_work(struct work_struct *__work);
+extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
+extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
+extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7..406303b509c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -100,15 +100,14 @@ struct intel_dvo {
bool panel_wants_dither;
};
-static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dvo, base.base);
+ return container_of(encoder, struct intel_dvo, base);
}
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dvo, base);
+ return enc_to_dvo(intel_attached_encoder(connector));
}
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
-static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_dvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
}
if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
+ &pipe_config->requested_mode,
+ adjusted_mode);
return true;
}
-static void intel_dvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dvo_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
@@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &crtc->config.requested_mode,
+ adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +334,8 @@ static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .mode_fixup = intel_dvo_mode_fixup,
- .mode_set = intel_dvo_mode_set,
-};
-
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
@@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
+ intel_encoder->compute_config = intel_dvo_compute_config;
+ intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
/* Now, try to find a controller */
@@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_dvo_helper_funcs);
-
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387..bc2100007b2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- obj->gtt_offset, obj);
+ i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
@@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
- struct fb_info *info;
- struct intel_framebuffer *ifb = &ifbdev->ifb;
-
if (ifbdev->helper.fbdev) {
- info = ifbdev->helper.fbdev;
+ struct fb_info *info = ifbdev->helper.fbdev;
+
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
+
framebuffer_release(info);
}
drm_fb_helper_fini(&ifbdev->helper);
- drm_framebuffer_unregister_private(&ifb->base);
- drm_framebuffer_cleanup(&ifb->base);
- if (ifb->obj) {
- drm_gem_object_unreference_unlocked(&ifb->obj->base);
- ifb->obj = NULL;
- }
+ drm_framebuffer_unregister_private(&ifbdev->ifb.base);
+ intel_framebuffer_fini(&ifbdev->ifb);
}
int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2fd3fd5b943..4148cc85bf7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
{
- uint8_t *data = (uint8_t *)frame;
- uint8_t sum = 0;
- unsigned i;
-
- frame->checksum = 0;
- frame->ecc = 0;
-
- for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
- sum += data[i];
-
- frame->checksum = 0x100 - sum;
-}
-
-static u32 g4x_infoframe_index(struct dip_infoframe *frame)
-{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_SELECT_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_SELECT_VENDOR;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VENDOR;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VS_HSW;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(VIDEO_DIP_CTL, val);
@@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
- if (frame->type != DIP_TYPE_AVI)
- val &= ~g4x_infoframe_enable(frame);
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
- u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
- unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 data_reg;
+ int i;
u32 val = I915_READ(ctl_reg);
+ data_reg = hsw_infoframe_data_reg(type,
+ intel_crtc->config.cpu_transcoder);
if (data_reg == 0)
return;
- val &= ~hsw_infoframe_enable(frame);
+ val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
mmiowb();
@@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, 0);
mmiowb();
- val |= hsw_infoframe_enable(frame);
+ val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
-static void intel_set_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1: DB3 | DB2 | DB1 | DB0
+ * DW2: DB7 | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct drm_encoder *encoder,
+ union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ ssize_t len;
+
+ /* see comment above for the reason for this offset */
+ len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (len < 0)
+ return;
- intel_dip_infoframe_csum(frame);
- intel_hdmi->write_infoframe(encoder, frame);
+ /* Insert the 'hole' (see big comment above) at position 3 */
+ buffer[0] = buffer[1];
+ buffer[1] = buffer[2];
+ buffer[2] = buffer[3];
+ buffer[3] = 0;
+ len++;
+
+ intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
+ union hdmi_infoframe frame;
+ int ret;
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
- avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
else
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
}
- avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
- intel_set_infoframe(encoder, &avi_if);
+ intel_write_infoframe(encoder, &frame);
}
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
- struct dip_infoframe spd_if;
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill SPD infoframe\n");
+ return;
+ }
+
+ frame.spd.sdi = HDMI_SPD_SDI_PC;
+
+ intel_write_infoframe(encoder, &frame);
+}
+
+static void
+intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
- memset(&spd_if, 0, sizeof(spd_if));
- spd_if.type = DIP_TYPE_SPD;
- spd_if.ver = DIP_VERSION_SPD;
- spd_if.len = DIP_LEN_SPD;
- strcpy(spd_if.body.spd.vn, "Intel");
- strcpy(spd_if.body.spd.pd, "Integrated gfx");
- spd_if.body.spd.sdi = DIP_SPD_PC;
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ adjusted_mode);
+ if (ret < 0)
+ return;
- intel_set_infoframe(encoder, &spd_if);
+ intel_write_infoframe(encoder, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
@@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
@@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -589,16 +638,16 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
-static void intel_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_hdmi_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@@ -609,7 +658,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->config.pipe_bpp > 24)
+ if (crtc->config.pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
- hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -719,14 +768,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+}
- if (IS_VALLEYVIEW(dev)) {
- struct intel_digital_port *dport =
- enc_to_dig_port(&encoder->base);
- int channel = vlv_dport_to_channel(dport);
-
- vlv_wait_port_ready(dev_priv, channel);
- }
+static void vlv_enable_hdmi(struct intel_encoder *encoder)
+{
}
static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -879,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
@@ -1030,6 +1078,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
return;
/* Enable clock channels for this port */
+ mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
@@ -1060,6 +1109,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, port);
}
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1073,6 +1127,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1091,6 +1146,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1113,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .mode_set = intel_hdmi_mode_set,
-};
-
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
@@ -1221,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1235,21 +1286,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
}
intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->compute_config = intel_hdmi_compute_config;
- intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997..d1c1e0f7f26 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i, reg_offset;
int ret = 0;
+ intel_aux_display_runtime_get(dev_priv);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
@@ -497,6 +498,7 @@ timeout:
out:
mutex_unlock(&dev_priv->gmbus_mutex);
+ intel_aux_display_runtime_put(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61348eae2f0..4d33278e31f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
-static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 temp;
+ if (HAS_PCH_SPLIT(dev)) {
+ assert_fdi_rx_pll_disabled(dev_priv, pipe);
+ assert_shared_dpll_disabled(dev_priv,
+ intel_crtc_to_shared_dpll(crtc));
+ } else {
+ assert_pll_disabled(dev_priv, pipe);
+ }
+
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (intel_crtc->config.dither &&
- intel_crtc->config.pipe_bpp == 18)
+ if (crtc->config.dither && crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -312,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-static void intel_lvds_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
- * The LVDS pin pair will already have been turned on in the
- * intel_crtc_mode_set since it has a large impact on the DPLL
- * settings.
+ * We don't do anything here, the LVDS port is fully set up in the pre
+ * enable hook - the ordering constraints for enabling the lvds port vs.
+ * enabling the display pll are too strict.
*/
}
@@ -336,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
@@ -497,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
return 0;
}
-static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .mode_set = intel_lvds_mode_set,
-};
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -959,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
+ intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
@@ -977,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = (1 << 1);
- drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c..ddfd0aefe0c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
- iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ reg_bo = NULL;
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (reg_bo == NULL)
@@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->gtt_offset;
+ overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = overlay->reg_bo->gtt_offset;
+ error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 5950888ae1d..a43c33bc4a3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- pipe_config->timings_set = true;
-
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b0e4a0bd131..46056820d1d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,8 +30,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-
-#define FORCEWAKE_ACK_TIMEOUT_MS 2
+#include <drm/i915_powerwell.h>
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
+ if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
dev_priv->display.enable_fbc(work->crtc,
work->interval);
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
}
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
- if (dev_priv->fbc_work == NULL)
+ if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
+ kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
return;
}
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+ dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
@@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
}
/**
@@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
- if (!i915_powersave)
+ if (!I915_HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
+ }
- if (!I915_HAS_FBC(dev))
+ if (!i915_powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
+ }
/*
* If FBC is already on, we just have to verify that we can
@@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev)
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
@@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev)
}
if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
@@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- enable_fbc = 0;
+ if (i915_enable_fbc < 0 &&
+ INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
}
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ if (!i915_enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
goto out_disable;
}
@@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev)
}
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
goto out_disable;
}
@@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev)
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
@@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
}
@@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
@@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev)
}
intel_enable_fbc(crtc, 500);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
@@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev)
I915_WRITE(FW_BLC, fwater_lo);
}
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
/*
* Check the wm result.
*
@@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev)
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
+ dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
+ dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev)
if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
+ dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
+ dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
@@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev)
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
@@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
static void ivybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
@@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &ignore_cursor_wm) ||
!ironlake_compute_srwm(dev, 3, enabled,
- 2 * SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.cur_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
@@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
-static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
- struct drm_crtc *crtc)
+static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
+ struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate, pfit_size;
@@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
return pixel_rate;
}
-static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint64_t ret;
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
return ret;
}
-static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint32_t ret;
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
-static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
{
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
@@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
struct hsw_pipe_wm_parameters {
bool active;
- bool sprite_enabled;
- uint8_t pri_bytes_per_pixel;
- uint8_t spr_bytes_per_pixel;
- uint8_t cur_bytes_per_pixel;
- uint32_t pri_horiz_pixels;
- uint32_t spr_horiz_pixels;
- uint32_t cur_horiz_pixels;
uint32_t pipe_htotal;
uint32_t pixel_rate;
+ struct intel_plane_wm_parameters pri;
+ struct intel_plane_wm_parameters spr;
+ struct intel_plane_wm_parameters cur;
};
struct hsw_wm_maximums {
@@ -2161,15 +2176,6 @@ struct hsw_wm_maximums {
uint16_t fbc;
};
-struct hsw_lp_wm_result {
- bool enable;
- bool fbc_enable;
- uint32_t pri_val;
- uint32_t spr_val;
- uint32_t cur_val;
- uint32_t fbc_val;
-};
-
struct hsw_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
@@ -2178,128 +2184,289 @@ struct hsw_wm_values {
bool enable_fbc_wm;
};
-enum hsw_data_buf_partitioning {
- HSW_DATA_BUF_PART_1_2,
- HSW_DATA_BUF_PART_5_6,
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+ bool fbc_wm_enabled;
};
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
uint32_t method1, method2;
- /* TODO: for now, assume the primary plane is always enabled. */
- if (!params->active)
+ if (!params->active || !params->pri.enabled)
return 0;
- method1 = hsw_wm_method1(params->pixel_rate,
- params->pri_bytes_per_pixel,
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->pri.bytes_per_pixel,
mem_value);
if (!is_lp)
return method1;
- method2 = hsw_wm_method2(params->pixel_rate,
+ method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->pri_horiz_pixels,
- params->pri_bytes_per_pixel,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
- if (!params->active || !params->sprite_enabled)
+ if (!params->active || !params->spr.enabled)
return 0;
- method1 = hsw_wm_method1(params->pixel_rate,
- params->spr_bytes_per_pixel,
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->spr.bytes_per_pixel,
mem_value);
- method2 = hsw_wm_method2(params->pixel_rate,
+ method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->spr_horiz_pixels,
- params->spr_bytes_per_pixel,
+ params->spr.horiz_pixels,
+ params->spr.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
- if (!params->active)
+ if (!params->active || !params->cur.enabled)
return 0;
- return hsw_wm_method2(params->pixel_rate,
+ return ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->cur_horiz_pixels,
- params->cur_bytes_per_pixel,
+ params->cur.horiz_pixels,
+ params->cur.bytes_per_pixel,
mem_value);
}
/* Only for WM_LP. */
-static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
- uint32_t pri_val,
- uint32_t mem_value)
+static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t pri_val)
{
- if (!params->active)
+ if (!params->active || !params->pri.enabled)
return 0;
- return hsw_wm_fbc(pri_val,
- params->pri_horiz_pixels,
- params->pri_bytes_per_pixel);
+ return ilk_wm_fbc(pri_val,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel);
}
-static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_lp_wm_result *result)
+static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
{
- enum pipe pipe;
- uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
+ if (INTEL_INFO(dev)->gen >= 7)
+ return 768;
+ else
+ return 512;
+}
- for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
- struct hsw_pipe_wm_parameters *p = &params[pipe];
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev);
+ unsigned int max;
- pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
- spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
- cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
- fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
- }
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
- result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
- result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
- result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
- result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_INFO(dev)->num_pipes;
- if (result->fbc_val > max->fbc) {
- result->fbc_enable = false;
- result->fbc_val = 0;
- } else {
- result->fbc_enable = true;
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (INTEL_INFO(dev)->gen <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
}
+ /* clamp to max that the registers can hold */
+ if (INTEL_INFO(dev)->gen >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ max = level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ max = level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ max = level == 0 ? 63 : 255;
+
+ return min(fifo_size, max);
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ if (INTEL_INFO(dev)->gen >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+/* Calculate the maximum FBC watermark */
+static unsigned int ilk_fbc_wm_max(void)
+{
+ /* max that registers can hold */
+ return 15;
+}
+
+static void ilk_wm_max(struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct hsw_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev, level, config);
+ max->fbc = ilk_fbc_wm_max();
+}
+
+static bool ilk_check_wm(int level,
+ const struct hsw_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
result->enable = result->pri_val <= max->pri &&
result->spr_val <= max->spr &&
result->cur_val <= max->cur;
- return result->enable;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
+ result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
+ result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+ int level,
+ struct hsw_pipe_wm_parameters *p,
+ struct intel_wm_level *result)
+{
+ uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+ uint16_t spr_latency = dev_priv->wm.spr_latency[level];
+ uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
+ result->spr_val = ilk_compute_spr_wm(p, spr_latency);
+ result->cur_val = ilk_compute_cur_wm(p, cur_latency);
+ result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
+ result->enable = true;
+}
+
+static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
+ int level, struct hsw_wm_maximums *max,
+ struct hsw_pipe_wm_parameters *params,
+ struct intel_wm_level *result)
+{
+ enum pipe pipe;
+ struct intel_wm_level res[3];
+
+ for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
+ ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
+
+ result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
+ result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
+ result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
+ result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
+ result->enable = true;
+
+ return ilk_check_wm(level, max, result);
}
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
- uint32_t mem_value, enum pipe pipe,
+ enum pipe pipe,
struct hsw_pipe_wm_parameters *params)
{
uint32_t pri_val, cur_val, spr_val;
+ /* WM0 latency values stored in 0.1us units */
+ uint16_t pri_latency = dev_priv->wm.pri_latency[0];
+ uint16_t spr_latency = dev_priv->wm.spr_latency[0];
+ uint16_t cur_latency = dev_priv->wm.cur_latency[0];
- pri_val = hsw_compute_pri_wm(params, mem_value, false);
- spr_val = hsw_compute_spr_wm(params, mem_value);
- cur_val = hsw_compute_cur_wm(params, mem_value);
+ pri_val = ilk_compute_pri_wm(params, pri_latency, false);
+ spr_val = ilk_compute_spr_wm(params, spr_latency);
+ cur_val = ilk_compute_cur_wm(params, cur_latency);
WARN(pri_val > 127,
"Primary WM error, mode not supported for pipe %c\n",
@@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
PIPE_WM_LINETIME_TIME(linetime);
}
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev)) {
+ uint64_t sskpd = I915_READ64(MCH_SSKPD);
+
+ wm[0] = (sskpd >> 56) & 0xFF;
+ if (wm[0] == 0)
+ wm[0] = sskpd & 0xF;
+ wm[1] = (sskpd >> 4) & 0xFF;
+ wm[2] = (sskpd >> 12) & 0xFF;
+ wm[3] = (sskpd >> 20) & 0x1FF;
+ wm[4] = (sskpd >> 32) & 0x1FF;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t sskpd = I915_READ(MCH_SSKPD);
+
+ wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
+ wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
+ wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
+ wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ uint32_t mltr = I915_READ(MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
+ wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ }
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+
+ /* WaDoubleCursorLP3Latency:ivb */
+ if (IS_IVYBRIDGE(dev))
+ wm[3] *= 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+ const char *name,
+ const uint16_t wm[5])
+{
+ int level, max_level;
+
+ /* how many WM levels are we expecting */
+ if (IS_HASWELL(dev))
+ max_level = 4;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ max_level = 3;
+ else
+ max_level = 2;
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ DRM_ERROR("%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+ name, level, wm[level],
+ latency / 10, latency % 10);
+ }
+}
+
+static void intel_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+
+ memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+
+ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
static void hsw_compute_wm_parameters(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
- uint32_t *wm,
struct hsw_wm_maximums *lp_max_1_2,
struct hsw_wm_maximums *lp_max_5_6)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_plane *plane;
- uint64_t sskpd = I915_READ64(MCH_SSKPD);
enum pipe pipe;
- int pipes_active = 0, sprites_enabled = 0;
-
- if ((sskpd >> 56) & 0xFF)
- wm[0] = (sskpd >> 56) & 0xFF;
- else
- wm[0] = sskpd & 0xF;
- wm[1] = ((sskpd >> 4) & 0xFF) * 5;
- wm[2] = ((sskpd >> 12) & 0xFF) * 5;
- wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
- wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
+ struct intel_wm_config config = {};
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
if (!p->active)
continue;
- pipes_active++;
+ config.num_pipes_active++;
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
- p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
- p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
- p->cur_bytes_per_pixel = 4;
- p->pri_horiz_pixels =
+ p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+ p->cur.bytes_per_pixel = 4;
+ p->pri.horiz_pixels =
intel_crtc->config.requested_mode.hdisplay;
- p->cur_horiz_pixels = 64;
+ p->cur.horiz_pixels = 64;
+ /* TODO: for now, assume primary and cursor planes are always enabled. */
+ p->pri.enabled = true;
+ p->cur.enabled = true;
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
pipe = intel_plane->pipe;
p = &params[pipe];
- p->sprite_enabled = intel_plane->wm.enable;
- p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
- p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
+ p->spr = intel_plane->wm;
- if (p->sprite_enabled)
- sprites_enabled++;
+ config.sprites_enabled |= p->spr.enabled;
+ config.sprites_scaled |= p->spr.scaled;
}
- if (pipes_active > 1) {
- lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
- lp_max_1_2->spr = lp_max_5_6->spr = 128;
- lp_max_1_2->cur = lp_max_5_6->cur = 64;
- } else {
- lp_max_1_2->pri = sprites_enabled ? 384 : 768;
- lp_max_5_6->pri = sprites_enabled ? 128 : 768;
- lp_max_1_2->spr = 384;
- lp_max_5_6->spr = 640;
- lp_max_1_2->cur = lp_max_5_6->cur = 255;
- }
- lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
+ ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
+ ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
+ else
+ *lp_max_5_6 = *lp_max_1_2;
}
static void hsw_compute_wm_results(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
- uint32_t *wm,
struct hsw_wm_maximums *lp_maximums,
struct hsw_wm_values *results)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct hsw_lp_wm_result lp_results[4] = {};
+ struct intel_wm_level lp_results[4] = {};
enum pipe pipe;
int level, max_level, wm_lp;
for (level = 1; level <= 4; level++)
- if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
+ if (!hsw_compute_lp_wm(dev_priv, level,
+ lp_maximums, params,
&lp_results[level - 1]))
break;
max_level = level - 1;
+ memset(results, 0, sizeof(*results));
+
/* The spec says it is preferred to disable FBC WMs instead of disabling
* a WM level. */
results->enable_fbc_wm = true;
for (level = 1; level <= max_level; level++) {
- if (!lp_results[level - 1].fbc_enable) {
+ if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
results->enable_fbc_wm = false;
- break;
+ lp_results[level - 1].fbc_val = 0;
}
}
- memset(results, 0, sizeof(*results));
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- const struct hsw_lp_wm_result *r;
+ const struct intel_wm_level *r;
level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
if (level > max_level)
@@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
}
for_each_pipe(pipe)
- results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
- pipe,
+ results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
&params[pipe]);
for_each_pipe(pipe) {
@@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+ struct hsw_wm_values *r2)
{
int i, val_r1 = 0, val_r2 = 0;
@@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
struct hsw_wm_values *results,
- enum hsw_data_buf_partitioning partitioning)
+ enum intel_ddb_partitioning partitioning)
{
struct hsw_wm_values previous;
uint32_t val;
- enum hsw_data_buf_partitioning prev_partitioning;
+ enum intel_ddb_partitioning prev_partitioning;
bool prev_enable_fbc_wm;
previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
@@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
if (prev_partitioning != partitioning) {
val = I915_READ(WM_MISC);
- if (partitioning == HSW_DATA_BUF_PART_1_2)
+ if (partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
@@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev)
struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
struct hsw_pipe_wm_parameters params[3];
struct hsw_wm_values results_1_2, results_5_6, *best_results;
- uint32_t wm[5];
- enum hsw_data_buf_partitioning partitioning;
+ enum intel_ddb_partitioning partitioning;
- hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
+ hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
- hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+ hsw_compute_wm_results(dev, params,
+ &lp_max_1_2, &results_1_2);
if (lp_max_1_2.pri != lp_max_5_6.pri) {
- hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
- &results_5_6);
+ hsw_compute_wm_results(dev, params,
+ &lp_max_5_6, &results_5_6);
best_results = hsw_find_best_result(&results_1_2, &results_5_6);
} else {
best_results = &results_1_2;
}
partitioning = (best_results == &results_1_2) ?
- HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
hsw_write_wm_values(dev_priv, best_results, partitioning);
}
-static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
+static void haswell_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
- struct drm_plane *plane;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct intel_plane *intel_plane = to_intel_plane(plane);
-
- if (intel_plane->pipe == pipe) {
- intel_plane->wm.enable = enable;
- intel_plane->wm.horiz_pixels = sprite_width + 1;
- intel_plane->wm.bytes_per_pixel = pixel_size;
- break;
- }
- }
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(dev);
+ haswell_update_wm(plane->dev);
}
static bool
@@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
return *sprite_wm > 0x3ff ? false : true;
}
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+static void sandybridge_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int pipe = to_intel_plane(plane)->pipe;
+ int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg;
int ret;
- if (!enable)
+ if (!enabled)
return;
switch (pipe) {
@@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.spr_latency[1] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
@@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.spr_latency[2] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
@@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.spr_latency[3] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
@@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size, enable);
+ dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ pixel_size, enabled, scaled);
}
static struct drm_i915_gem_object *
@@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 pval;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- do {
- pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
- break;
- }
- udelay(10);
- } while (pval & 1);
+ if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+ DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
pval >>= 8;
@@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
}
-
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
/* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- spin_lock_irq(&dev_priv->rps.lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ gen6_disable_rps_interrupts(dev);
+}
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen6_disable_rps_interrupts(dev);
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
int intel_enable_rc6(const struct drm_device *dev)
{
+ /* No RC6 before Ironlake */
+ if (INTEL_INFO(dev)->gen < 5)
+ return 0;
+
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
@@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev)
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ /* only unmask PM interrupts we need. Mask all others. */
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+}
+
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
@@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- /* FIXME: Our interrupt enabling sequence is bonghits.
- * dev_priv->rps.pm_iir really should be 0 here. */
- dev_priv->rps.pm_iir = 0;
- I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* unmask all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3356,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_put(dev_priv);
}
-static void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
@@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
- -1,
+ I915_GTT_OFFSET_NONE,
pctx_size);
goto out;
}
@@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- WARN_ON(dev_priv->rps.pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv);
}
@@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- /* Required for FBC */
+ /*
+ * Required for FBC
+ * WaFbcDisableDpfcClockGating:ilk
+ */
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
@@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
+ *
+ * WaFbcAsynchFlipDisableFbcQueue:snb
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- /* WaMbcDriverBootEnable:snb */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
g4x_disable_trickle_feed(dev);
/* The default value should be 0x200 according to docs, but the two
@@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
- /* WaMbcDriverBootEnable:hsw */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* WaMbcDriverBootEnable:ivb */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
/* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- /* WaMbcDriverBootEnable:vlv */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
-
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
@@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
case POWER_DOMAIN_TRANSCODER_B:
case POWER_DOMAIN_TRANSCODER_C:
return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
default:
BUG();
}
@@ -5048,17 +5281,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
uint32_t tmp;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE;
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
if (enable) {
if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE), 20))
+ HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
} else {
@@ -5178,10 +5412,21 @@ void intel_init_power_well(struct drm_device *dev)
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
+/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ hsw_disable_package_c8(dev_priv);
+}
+
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ hsw_enable_package_c8(dev_priv);
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
@@ -5217,8 +5462,12 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ intel_setup_wm_latency(dev);
+
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ if (dev_priv->wm.pri_latency[1] &&
+ dev_priv->wm.spr_latency[1] &&
+ dev_priv->wm.cur_latency[1])
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
@@ -5227,7 +5476,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
@@ -5237,7 +5488,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
@@ -5247,7 +5500,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else if (IS_HASWELL(dev)) {
- if (I915_READ64(MCH_SSKPD)) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = haswell_update_wm;
dev_priv->display.update_sprite_wm =
haswell_update_sprite_wm;
@@ -5310,260 +5565,6 @@ void intel_init_pm(struct drm_device *dev)
}
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
-{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
-}
-
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-}
-
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- u32 forcewake_ack;
-
- if (IS_HASWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:ivb,hsw */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->gt.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->gt.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
- }
- dev_priv->gt_fifo_count--;
-
- return ret;
-}
-
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- POSTING_READ(FORCEWAKE_ACK_VLV);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
-
- /* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void intel_gt_sanitize(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
-
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- if (INTEL_INFO(dev)->gen >= 6)
- intel_disable_gt_powersave(dev);
-}
-
-void intel_gt_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->gt.force_wake_get = vlv_force_wake_get;
- dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_HASWELL(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* IVB configs may use multi-threaded forcewake */
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- } else {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- } else if (IS_GEN6(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
-}
-
-void intel_pm_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
- intel_gen6_powersave_work);
-}
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5666,3 +5667,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 079ef0129e7..f05cceac5a5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == obj->gtt_offset &&
+ I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = obj->gtt_offset;
+ pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) {
ret = -ENOMEM;
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
- dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
- }
+ if (ring->irq_refcount++ == 0)
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
- dev_priv->gt_irq_mask |= ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
- }
+ if (--ring->irq_refcount == 0)
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -1033,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_get(dev_priv);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1057,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
else
I915_WRITE_IMR(ring, ~0);
- dev_priv->gt_irq_mask |= ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1082,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (ring->irq_refcount.pm++ == 0) {
- u32 pm_imr = I915_READ(GEN6_PMIMR);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
- POSTING_READ(GEN6_PMIMR);
+ snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -1104,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (--ring->irq_refcount.pm == 0) {
- u32 pm_imr = I915_READ(GEN6_PMIMR);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
- POSTING_READ(GEN6_PMIMR);
+ snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static int
@@ -1156,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = obj->gtt_offset;
+ u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1236,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1319,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1328,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1606,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+ if (HAS_VEBOX(ring->dev))
+ I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
ring->set_seqno(ring, seqno);
@@ -1840,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
@@ -2020,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da4..432ad5311ba 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,11 +33,12 @@ struct intel_hw_status_page {
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
-#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
-#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
-#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
-
-enum intel_ring_hangcheck_action { wait, active, kick, hung };
+enum intel_ring_hangcheck_action {
+ HANGCHECK_WAIT,
+ HANGCHECK_ACTIVE,
+ HANGCHECK_KICK,
+ HANGCHECK_HUNG,
+};
struct intel_ring_hangcheck {
bool deadlock;
@@ -78,10 +79,7 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- struct {
- u32 gt; /* protected by dev_priv->irq_lock */
- u32 pm; /* protected by dev_priv->rps.lock (sucks) */
- } irq_refcount;
+ unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d562244..317e058fb3c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -202,15 +202,14 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_sdvo, base.base);
+ return container_of(encoder, struct intel_sdvo, base);
}
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_sdvo, base);
+ return to_sdvo(intel_attached_encoder(connector));
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
@@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
+ uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ union hdmi_infoframe frame;
+ int ret;
+ ssize_t len;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return false;
+ }
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
else
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
}
- avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
- intel_dip_infoframe_csum(&avi_if);
-
- /* sdvo spec says that the ecc is handled by the hw, and it looks like
- * we must not send the ecc field, either. */
- memcpy(sdvo_data, &avi_if, 3);
- sdvo_data[3] = avi_if.checksum;
- memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+ len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
+ if (len < 0)
+ return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
@@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
@@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
@@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
@@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
@@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
/* Cross check the port pixel multiplier with the sdvo encoder state. */
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
- switch (val) {
- case SDVO_CLOCK_RATE_MULT_1X:
- encoder_pixel_multiplier = 1;
- break;
- case SDVO_CLOCK_RATE_MULT_2X:
- encoder_pixel_multiplier = 2;
- break;
- case SDVO_CLOCK_RATE_MULT_4X:
- encoder_pixel_multiplier = 4;
- break;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+ &val, 1)) {
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
+ }
}
- if(HAS_PCH_SPLIT(dev))
- return; /* no pixel multiplier readout support yet */
-
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
@@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
@@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
@@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a457..78b621cdd10 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -38,7 +38,8 @@
#include "i915_drv.h"
static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
sprctl |= SP_ENABLE;
+ intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
static void
-vlv_disable_plane(struct drm_plane *dplane)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
+
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
static int
@@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
}
static void
-ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
@@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
}
static void
-ivb_disable_plane(struct drm_plane *plane)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
- intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-ilk_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
+
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
static void
@@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_enable_primary(crtc);
if (visible)
- intel_plane->update_plane(plane, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
- intel_plane->disable_plane(plane);
+ intel_plane->disable_plane(plane, crtc);
if (disable_primary)
intel_disable_primary(crtc);
@@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
- if (plane->crtc)
- intel_enable_primary(plane->crtc);
- intel_plane->disable_plane(plane);
+ if (!plane->fb)
+ return 0;
+
+ if (WARN_ON(!plane->crtc))
+ return -EINVAL;
+
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane, plane->crtc);
if (!intel_plane->obj)
goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 39debd80d19..f2c6d7909ae 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
},
};
-static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_tv, base.base);
+ return container_of(encoder, struct intel_tv, base);
}
static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_tv,
- base);
+ return enc_to_tv(intel_attached_encoder(connector));
}
static bool
@@ -908,7 +906,7 @@ static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
- struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (!tv_mode)
@@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
return true;
}
-static void
-intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_tv_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
@@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
mode = reported_modes[0];
if (force) {
@@ -1483,10 +1482,6 @@ out:
return ret;
}
-static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .mode_set = intel_tv_mode_set,
-};
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_tv_detect,
@@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
@@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 00000000000..8f5bc869c02
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE, 1);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:snb */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:ivb,hsw */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+
+ gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->uncore.fifo_count = fifo;
+ }
+ dev_priv->uncore.fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+ /* WaRsForcewakeWaitTC0:vlv */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_disable_gt_powersave(dev);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+ * the chip from rc6 before touching it for real. MI_MODE is masked,
+ * hence harmless to write 0 into. */
+ __raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+ reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed write to %x\n", reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+#define __i915_read(x) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val; \
+}
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
+ unsigned long irqflags; \
+ u32 __fifo_ret = 0; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ hsw_unclaimed_reg_clear(dev_priv, reg); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+ int ret;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long irqflags;
+
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->uncore.forcewake_count)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ else
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: return gen6_do_reset(dev);
+ case 5: return ironlake_do_reset(dev);
+ case 4: return i965_do_reset(dev);
+ case 2: return i8xx_do_reset(dev);
+ default: return -ENODEV;
+ }
+}
+
+void intel_uncore_clear_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* XXX needs spinlock around caller's grouping */
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed register before interrupt\n");
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 17d0a637e4f..6b1a87c8aac 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = mga_compat_ioctl,
#endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969..ca4bc54ea21 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
unsigned int agp_size;
} drm_mga_private_t;
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0..37cc2fb4ead 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
return 0;
}
-struct drm_ioctl_desc mga_ioctls[] = {
+const struct drm_ioctl_desc mga_ioctls[] = {
DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7..fcce7b2f801 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = mgag200_driver_load,
.unload = mgag200_driver_unload,
.fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
.gem_free_object = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
- .dumb_destroy = mgag200_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d935..baaae19332e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
void mgag200_gem_free_object(struct drm_gem_object *obj);
int
mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee..0f8b861b10b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int mgag200_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d70e4a92773..07b192fe15c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
}
static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- mgabo->gem.driver_private = NULL;
mgabo->bo.bdev = &mdev->ttm.bdev;
mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 00000000000..a06c19cc56f
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
+
+config DRM_MSM
+ tristate "MSM DRM"
+ depends on DRM
+ depends on ARCH_MSM
+ depends on ARCH_MSM8960
+ select DRM_KMS_HELPER
+ select SHMEM
+ select TMPFS
+ default y
+ help
+ DRM/KMS driver for MSM/snapdragon.
+
+config DRM_MSM_FBDEV
+ bool "Enable legacy fbdev support for MSM modesetting driver"
+ depends on DRM_MSM
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the MSM modesetting driver.
+
+config DRM_MSM_REGISTER_LOGGING
+ bool "MSM DRM register logging"
+ depends on DRM_MSM
+ default n
+ help
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 00000000000..e17914889e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ ccflags-y += -Werror
+endif
+
+msm-y := \
+ adreno/adreno_gpu.o \
+ adreno/a3xx_gpu.o \
+ hdmi/hdmi.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_connector.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8x60.o \
+ mdp4/mdp4_crtc.o \
+ mdp4/mdp4_dtv_encoder.o \
+ mdp4/mdp4_format.o \
+ mdp4/mdp4_irq.o \
+ mdp4/mdp4_kms.o \
+ mdp4/mdp4_plane.o \
+ msm_drv.o \
+ msm_fb.o \
+ msm_gem.o \
+ msm_gem_submit.o \
+ msm_gpu.o \
+ msm_ringbuffer.o
+
+msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
+
+obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 00000000000..e036f6c1db9
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
+NOTES about msm drm/kms driver:
+
+In the current snapdragon SoC's, we have (at least) 3 different
+display controller blocks at play:
+ + MDP3 - ?? seems to be what is on geeksphone peak device
+ + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ + MDSS - snapdragon 800
+
+(I don't have a completely clear picture on which display controller
+maps to which part #)
+
+Plus a handful of blocks around them for HDMI/DSI/etc output.
+
+And on gpu side of things:
+ + zero, one, or two 2d cores (z180)
+ + and either a2xx or a3xx 3d core.
+
+But, HDMI/DSI/etc blocks seem like they can be shared across multiple
+display controller blocks. And I for sure don't want to have to deal
+with N different kms devices from xf86-video-freedreno. Plus, it
+seems like we can do some clever tricks like use GPU to trigger
+pageflip after rendering completes (ie. have the kms/crtc code build
+up gpu cmdstream to update scanout and write FLUSH register after).
+
+So, the approach is one drm driver, with some modularity. Different
+'struct msm_kms' implementations, depending on display controller.
+And one or more 'struct msm_gpu' for the various different gpu sub-
+modules.
+
+(Second part is not implemented yet. So far this is just basic KMS
+driver, and not exposing any custom ioctls to userspace for now.)
+
+The kms module provides the plane, crtc, and encoder objects, and
+loads whatever connectors are appropriate.
+
+For MDP4, the mapping is:
+
+ plane -> PIPE{RGBn,VGn} \
+ crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
+ encoder -> DTV/LCDC/DSI (within MDP4) /
+ connector -> HDMI/DSI/etc --> other device(s)
+
+Since the irq's that drm core mostly cares about are vblank/framedone,
+we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
+and treat the MDP4 block's irq as "the" irq. Even though the connectors
+may have their own irqs which they install themselves. For this reason
+the display controller is the "master" device.
+
+Each connector probably ends up being a separate device, just for the
+logistics of finding/mapping io region, irq, etc. Idealy we would
+have a better way than just stashing the platform device in a global
+(ie. like DT super-node.. but I don't have any snapdragon hw yet that
+is using DT).
+
+Note that so far I've not been able to get any docs on the hw, and it
+seems that access to such docs would prevent me from working on the
+freedreno gallium driver. So there may be some mistakes in register
+names (I had to invent a few, since no sufficient hint was given in
+the downstream android fbdev driver), bitfield sizes, etc. My current
+state of understanding the registers is given in the envytools rnndb
+files at:
+
+ https://github.com/freedreno/envytools/tree/master/rnndb
+ (the mdp4/hdmi/dsi directories)
+
+These files are used both for a parser tool (in the same tree) to
+parse logged register reads/writes (both from downstream android fbdev
+driver, and this driver with register logging enabled), as well as to
+generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 00000000000..35463864b95
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
+#ifndef A2XX_XML
+#define A2XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a2xx_rb_dither_type {
+ DITHER_PIXEL = 0,
+ DITHER_SUBPIXEL = 1,
+};
+
+enum a2xx_colorformatx {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum a2xx_sq_surfaceformat {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_10_11_11 = 16,
+ FMT_11_11_10 = 17,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_24_8 = 22,
+ FMT_24_8_FLOAT = 23,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_32_AS_8 = 39,
+ FMT_32_AS_8_8 = 40,
+ FMT_16_MPEG = 41,
+ FMT_16_16_MPEG = 42,
+ FMT_8_INTERLACED = 43,
+ FMT_32_AS_8_INTERLACED = 44,
+ FMT_32_AS_8_8_INTERLACED = 45,
+ FMT_16_INTERLACED = 46,
+ FMT_16_MPEG_INTERLACED = 47,
+ FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_DXN = 49,
+ FMT_8_8_8_8_AS_16_16_16_16 = 50,
+ FMT_DXT1_AS_16_16_16_16 = 51,
+ FMT_DXT2_3_AS_16_16_16_16 = 52,
+ FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_11_11_AS_16_16_16_16 = 55,
+ FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+ FMT_DXT3A_AS_1_1_1_1 = 61,
+};
+
+enum a2xx_sq_ps_vtx_mode {
+ POSITION_1_VECTOR = 0,
+ POSITION_2_VECTORS_UNUSED = 1,
+ POSITION_2_VECTORS_SPRITE = 2,
+ POSITION_2_VECTORS_EDGE = 3,
+ POSITION_2_VECTORS_KILL = 4,
+ POSITION_2_VECTORS_SPRITE_KILL = 5,
+ POSITION_2_VECTORS_EDGE_KILL = 6,
+ MULTIPASS = 7,
+};
+
+enum a2xx_sq_sample_cntl {
+ CENTROIDS_ONLY = 0,
+ CENTERS_ONLY = 1,
+ CENTROIDS_AND_CENTERS = 2,
+};
+
+enum a2xx_dx_clip_space {
+ DXCLIP_OPENGL = 0,
+ DXCLIP_DIRECTX = 1,
+};
+
+enum a2xx_pa_su_sc_polymode {
+ POLY_DISABLED = 0,
+ POLY_DUALMODE = 1,
+};
+
+enum a2xx_rb_edram_mode {
+ EDRAM_NOP = 0,
+ COLOR_DEPTH = 4,
+ DEPTH_ONLY = 5,
+ EDRAM_COPY = 6,
+};
+
+enum a2xx_pa_sc_pattern_bit_order {
+ LITTLE = 0,
+ BIG = 1,
+};
+
+enum a2xx_pa_sc_auto_reset_cntl {
+ NEVER = 0,
+ EACH_PRIMITIVE = 1,
+ EACH_PACKET = 2,
+};
+
+enum a2xx_pa_pixcenter {
+ PIXCENTER_D3D = 0,
+ PIXCENTER_OGL = 1,
+};
+
+enum a2xx_pa_roundmode {
+ TRUNCATE = 0,
+ ROUND = 1,
+ ROUNDTOEVEN = 2,
+ ROUNDTOODD = 3,
+};
+
+enum a2xx_pa_quantmode {
+ ONE_SIXTEENTH = 0,
+ ONE_EIGTH = 1,
+ ONE_QUARTER = 2,
+ ONE_HALF = 3,
+ ONE = 4,
+};
+
+enum a2xx_rb_copy_sample_select {
+ SAMPLE_0 = 0,
+ SAMPLE_1 = 1,
+ SAMPLE_2 = 2,
+ SAMPLE_3 = 3,
+ SAMPLE_01 = 4,
+ SAMPLE_23 = 5,
+ SAMPLE_0123 = 6,
+};
+
+enum sq_tex_clamp {
+ SQ_TEX_WRAP = 0,
+ SQ_TEX_MIRROR = 1,
+ SQ_TEX_CLAMP_LAST_TEXEL = 2,
+ SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
+ SQ_TEX_CLAMP_HALF_BORDER = 4,
+ SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
+ SQ_TEX_CLAMP_BORDER = 6,
+ SQ_TEX_MIRROR_ONCE_BORDER = 7,
+};
+
+enum sq_tex_swiz {
+ SQ_TEX_X = 0,
+ SQ_TEX_Y = 1,
+ SQ_TEX_Z = 2,
+ SQ_TEX_W = 3,
+ SQ_TEX_ZERO = 4,
+ SQ_TEX_ONE = 5,
+};
+
+enum sq_tex_filter {
+ SQ_TEX_FILTER_POINT = 0,
+ SQ_TEX_FILTER_BILINEAR = 1,
+ SQ_TEX_FILTER_BICUBIC = 2,
+};
+
+#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
+
+#define REG_A2XX_RBBM_CNTL 0x0000003b
+
+#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
+
+#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
+
+#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
+
+#define REG_A2XX_RBBM_DEBUG 0x0000039b
+
+#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
+
+#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
+
+#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
+
+#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
+
+#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+
+#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
+
+#define REG_A2XX_RBBM_INT_ACK 0x000003b6
+
+#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+
+#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
+
+#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
+
+#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
+
+#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
+
+#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
+
+#define REG_A2XX_CP_ST_BASE 0x0000044d
+
+#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_A2XX_CP_IB1_BASE 0x00000458
+
+#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_A2XX_CP_IB2_BASE 0x0000045a
+
+#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_A2XX_CP_STAT 0x0000047f
+
+#define REG_A2XX_RBBM_STATUS 0x000005d0
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
+static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
+{
+ return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
+}
+#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
+#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
+#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
+#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
+#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
+#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
+#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
+#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
+#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
+#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
+#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
+#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
+#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
+#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
+#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
+#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
+#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
+#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
+#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+
+#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
+
+#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
+
+#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
+
+#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
+
+#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
+
+#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
+
+#define REG_A2XX_SQ_INT_CNTL 0x00000d34
+
+#define REG_A2XX_SQ_INT_STATUS 0x00000d35
+
+#define REG_A2XX_SQ_INT_ACK 0x00000d36
+
+#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
+
+#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
+
+#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
+
+#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
+
+#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
+
+#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
+
+#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
+
+#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
+
+#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
+#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
+
+#define REG_A2XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A2XX_RB_BC_CONTROL 0x00000f01
+#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
+#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
+#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
+static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
+#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
+#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
+#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
+}
+#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
+static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
+#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
+#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
+
+#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
+
+#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
+
+#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
+
+#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+
+#define REG_A2XX_RB_COLOR_INFO 0x00002001
+#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
+#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
+static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
+}
+#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
+#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
+#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
+static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
+}
+#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
+#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
+static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
+#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_INFO 0x00002002
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
+
+#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
+#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_UNKNOWN_2010 0x00002010
+
+#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
+
+#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
+
+#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
+
+#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
+
+#define REG_A2XX_RB_COLOR_MASK 0x00002104
+#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
+#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
+#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
+#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
+
+#define REG_A2XX_RB_BLEND_RED 0x00002105
+
+#define REG_A2XX_RB_BLEND_GREEN 0x00002106
+
+#define REG_A2XX_RB_BLEND_BLUE 0x00002107
+
+#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
+
+#define REG_A2XX_RB_FOG_COLOR 0x00002109
+
+#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
+#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_ALPHA_REF 0x0000210e
+
+#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
+#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
+#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
+#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
+#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
+#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
+#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
+#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
+#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
+
+#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
+#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
+#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
+#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
+#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
+
+#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
+
+#define REG_A2XX_SQ_WRAPPING_0 0x00002183
+
+#define REG_A2XX_SQ_WRAPPING_1 0x00002184
+
+#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
+
+#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+
+#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
+#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
+#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
+#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
+#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
+}
+
+#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
+
+#define REG_A2XX_RB_COLORCONTROL 0x00002202
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
+#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
+#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
+#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
+#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
+#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
+}
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
+#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
+static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
+{
+ return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
+}
+#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
+#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
+#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
+#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
+#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
+
+#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
+#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
+#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
+#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
+#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
+#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
+#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
+#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
+#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
+#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
+
+#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
+#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
+#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
+#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
+#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_RB_MODECONTROL 0x00002208
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
+static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
+{
+ return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
+}
+
+#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
+
+#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
+
+#define REG_A2XX_CLEAR_COLOR 0x0000220b
+#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
+#define A2XX_CLEAR_COLOR_RED__SHIFT 0
+static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
+}
+#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
+#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
+static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
+}
+#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
+#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
+static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
+}
+#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
+#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
+static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
+}
+
+#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
+
+#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+}
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
+#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
+}
+
+#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
+
+#define REG_A2XX_VGT_ENHANCE 0x00002294
+
+#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
+}
+#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
+#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
+#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
+
+#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
+
+#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_SQ_VS_CONST 0x00002307
+#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_PS_CONST 0x00002308
+#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
+
+#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
+
+#define REG_A2XX_PA_SC_AA_MASK 0x00002312
+
+#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
+
+#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
+
+#define REG_A2XX_RB_COPY_CONTROL 0x00002318
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
+}
+#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
+
+#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
+#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
+#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
+
+#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
+#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
+#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
+}
+#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
+#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
+
+#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
+
+#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
+
+#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
+
+#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
+
+#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
+
+#define REG_A2XX_SQ_CONSTANT_0 0x00004000
+
+#define REG_A2XX_SQ_FETCH_0 0x00004800
+
+#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
+
+#define REG_A2XX_SQ_CF_LOOP 0x00004908
+
+#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
+
+#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
+
+#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+
+#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
+#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
+#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
+#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_1 0x00000001
+
+#define REG_A2XX_SQ_TEX_2 0x00000002
+#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
+#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
+}
+#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
+#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
+#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
+#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
+#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
+#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
+static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
+static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
+}
+
+
+#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 00000000000..d183516067b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
+#ifndef A3XX_XML
+#define A3XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+};
+
+enum a3xx_tile_mode {
+ LINEAR = 0,
+ TILE_32X32 = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
+enum a3xx_state_block_id {
+ HLSQ_BLOCK_ID_TP_TEX = 2,
+ HLSQ_BLOCK_ID_TP_MIPMAP = 3,
+ HLSQ_BLOCK_ID_SP_VS = 4,
+ HLSQ_BLOCK_ID_SP_FS = 6,
+};
+
+enum a3xx_cache_opcode {
+ INVALIDATE = 1,
+};
+
+enum a3xx_vtx_fmt {
+ VFMT_FLOAT_32 = 0,
+ VFMT_FLOAT_32_32 = 1,
+ VFMT_FLOAT_32_32_32 = 2,
+ VFMT_FLOAT_32_32_32_32 = 3,
+ VFMT_FLOAT_16 = 4,
+ VFMT_FLOAT_16_16 = 5,
+ VFMT_FLOAT_16_16_16 = 6,
+ VFMT_FLOAT_16_16_16_16 = 7,
+ VFMT_FIXED_32 = 8,
+ VFMT_FIXED_32_32 = 9,
+ VFMT_FIXED_32_32_32 = 10,
+ VFMT_FIXED_32_32_32_32 = 11,
+ VFMT_SHORT_16 = 16,
+ VFMT_SHORT_16_16 = 17,
+ VFMT_SHORT_16_16_16 = 18,
+ VFMT_SHORT_16_16_16_16 = 19,
+ VFMT_USHORT_16 = 20,
+ VFMT_USHORT_16_16 = 21,
+ VFMT_USHORT_16_16_16 = 22,
+ VFMT_USHORT_16_16_16_16 = 23,
+ VFMT_NORM_SHORT_16 = 24,
+ VFMT_NORM_SHORT_16_16 = 25,
+ VFMT_NORM_SHORT_16_16_16 = 26,
+ VFMT_NORM_SHORT_16_16_16_16 = 27,
+ VFMT_NORM_USHORT_16 = 28,
+ VFMT_NORM_USHORT_16_16 = 29,
+ VFMT_NORM_USHORT_16_16_16 = 30,
+ VFMT_NORM_USHORT_16_16_16_16 = 31,
+ VFMT_UBYTE_8 = 40,
+ VFMT_UBYTE_8_8 = 41,
+ VFMT_UBYTE_8_8_8 = 42,
+ VFMT_UBYTE_8_8_8_8 = 43,
+ VFMT_NORM_UBYTE_8 = 44,
+ VFMT_NORM_UBYTE_8_8 = 45,
+ VFMT_NORM_UBYTE_8_8_8 = 46,
+ VFMT_NORM_UBYTE_8_8_8_8 = 47,
+ VFMT_BYTE_8 = 48,
+ VFMT_BYTE_8_8 = 49,
+ VFMT_BYTE_8_8_8 = 50,
+ VFMT_BYTE_8_8_8_8 = 51,
+ VFMT_NORM_BYTE_8 = 52,
+ VFMT_NORM_BYTE_8_8 = 53,
+ VFMT_NORM_BYTE_8_8_8 = 54,
+ VFMT_NORM_BYTE_8_8_8_8 = 55,
+ VFMT_UINT_10_10_10_2 = 60,
+ VFMT_NORM_UINT_10_10_10_2 = 61,
+ VFMT_INT_10_10_10_2 = 62,
+ VFMT_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a3xx_tex_fmt {
+ TFMT_NORM_USHORT_565 = 4,
+ TFMT_NORM_USHORT_5551 = 6,
+ TFMT_NORM_USHORT_4444 = 7,
+ TFMT_NORM_UINT_X8Z24 = 10,
+ TFMT_NORM_UINT_NV12_UV_TILED = 17,
+ TFMT_NORM_UINT_NV12_Y_TILED = 19,
+ TFMT_NORM_UINT_NV12_UV = 21,
+ TFMT_NORM_UINT_NV12_Y = 23,
+ TFMT_NORM_UINT_I420_Y = 24,
+ TFMT_NORM_UINT_I420_U = 26,
+ TFMT_NORM_UINT_I420_V = 27,
+ TFMT_NORM_UINT_2_10_10_10 = 41,
+ TFMT_NORM_UINT_A8 = 44,
+ TFMT_NORM_UINT_L8_A8 = 47,
+ TFMT_NORM_UINT_8 = 48,
+ TFMT_NORM_UINT_8_8 = 49,
+ TFMT_NORM_UINT_8_8_8 = 50,
+ TFMT_NORM_UINT_8_8_8_8 = 51,
+ TFMT_FLOAT_16 = 64,
+ TFMT_FLOAT_16_16 = 65,
+ TFMT_FLOAT_16_16_16_16 = 67,
+ TFMT_FLOAT_32 = 84,
+ TFMT_FLOAT_32_32 = 85,
+ TFMT_FLOAT_32_32_32_32 = 87,
+};
+
+enum a3xx_tex_fetchsize {
+ TFETCH_DISABLE = 0,
+ TFETCH_1_BYTE = 1,
+ TFETCH_2_BYTE = 2,
+ TFETCH_4_BYTE = 3,
+ TFETCH_8_BYTE = 4,
+ TFETCH_16_BYTE = 5,
+};
+
+enum a3xx_color_fmt {
+ RB_R8G8B8_UNORM = 4,
+ RB_R8G8B8A8_UNORM = 8,
+ RB_Z16_UNORM = 12,
+ RB_A8_UNORM = 20,
+};
+
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_sp_perfcounter_select {
+ SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+ SP0_ICL1_MISSES = 26,
+ SP_ALU_ACTIVE_CYCLES = 29,
+};
+
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_tex_filter {
+ A3XX_TEX_NEAREST = 0,
+ A3XX_TEX_LINEAR = 1,
+};
+
+enum a3xx_tex_clamp {
+ A3XX_TEX_REPEAT = 0,
+ A3XX_TEX_CLAMP_TO_EDGE = 1,
+ A3XX_TEX_MIRROR_REPEAT = 2,
+ A3XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a3xx_tex_swiz {
+ A3XX_TEX_X = 0,
+ A3XX_TEX_Y = 1,
+ A3XX_TEX_Z = 2,
+ A3XX_TEX_W = 3,
+ A3XX_TEX_ZERO = 4,
+ A3XX_TEX_ONE = 5,
+};
+
+enum a3xx_tex_type {
+ A3XX_TEX_1D = 0,
+ A3XX_TEX_2D = 1,
+ A3XX_TEX_CUBE = 2,
+ A3XX_TEX_3D = 3,
+};
+
+#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A3XX_INT0_VFD_ERROR 0x00000040
+#define A3XX_INT0_CP_SW_INT 0x00000080
+#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A3XX_INT0_CP_HW_FAULT 0x00000800
+#define A3XX_INT0_CP_DMA 0x00001000
+#define A3XX_INT0_CP_IB2_INT 0x00002000
+#define A3XX_INT0_CP_IB1_INT 0x00004000
+#define A3XX_INT0_CP_RB_INT 0x00008000
+#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A3XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
+
+#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
+
+#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
+
+#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
+
+#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
+
+#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
+
+#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
+
+#define REG_A3XX_RBBM_AHB_CMD 0x00000022
+
+#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
+
+#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
+
+#define REG_A3XX_RBBM_STATUS 0x00000030
+#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+
+#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
+
+#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
+
+#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
+
+#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
+
+#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
+
+#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
+
+#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
+
+#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
+
+#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
+
+#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
+
+#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
+
+#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
+
+#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
+
+#define REG_A3XX_CP_ROQ_DATA 0x000001cd
+
+#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
+
+#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
+
+#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
+
+#define REG_A3XX_CP_MEQ_ADDR 0x000001da
+
+#define REG_A3XX_CP_MEQ_DATA 0x000001db
+
+#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A3XX_CP_HW_FAULT 0x0000045c
+
+#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
+
+#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
+
+static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+
+#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
+#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+
+#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
+#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
+#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
+#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
+#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
+#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+
+#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
+{
+ return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+
+#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
+static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
+#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
+static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
+#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
+
+#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
+static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
+#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
+static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
+#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_20C3 0x000020c3
+
+static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
+
+#define REG_A3XX_RB_BLEND_RED 0x000020e4
+#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
+#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
+#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
+#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_20E8 0x000020e8
+
+#define REG_A3XX_UNKNOWN_20E9 0x000020e9
+
+#define REG_A3XX_UNKNOWN_20EA 0x000020ea
+
+#define REG_A3XX_UNKNOWN_20EB 0x000020eb
+
+#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
+#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
+#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
+#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
+#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
+
+#define REG_A3XX_UNKNOWN_2101 0x00002101
+
+#define REG_A3XX_RB_DEPTH_INFO 0x00002102
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
+#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A3XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_2105 0x00002105
+
+#define REG_A3XX_UNKNOWN_2106 0x00002106
+
+#define REG_A3XX_UNKNOWN_2107 0x00002107
+
+#define REG_A3XX_RB_STENCILREFMASK 0x00002108
+#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
+#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+
+#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
+
+#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+
+#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
+
+#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+
+#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+
+#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+
+#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
+
+#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
+
+#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
+
+#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
+
+#define REG_A3XX_VFD_CONTROL_0 0x00002240
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
+static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A3XX_VFD_CONTROL_1 0x00002241
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A3XX_VFD_INDEX_MIN 0x00002242
+
+#define REG_A3XX_VFD_INDEX_MAX 0x00002243
+
+#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
+
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
+static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
+}
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
+}
+
+#define REG_A3XX_VPC_ATTR 0x00002280
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
+#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
+#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
+static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
+}
+
+#define REG_A3XX_VPC_PACK 0x00002281
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
+
+#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
+#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
+static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
+static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
+#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
+
+#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
+
+#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+
+#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
+
+#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
+
+#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
+
+#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+
+#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
+
+#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+
+static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
+
+#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
+
+#define REG_A3XX_VBIF_CLKON 0x00003001
+
+#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
+
+#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
+
+#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
+
+#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
+
+#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
+
+#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+
+#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
+#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
+
+static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
+#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
+#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
+#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
+#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
+}
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
+
+#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A3XX_UNKNOWN_0C81 0x00000c81
+
+#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
+
+#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
+
+#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
+
+#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
+#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
+#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+}
+#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
+
+#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
+
+#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
+
+#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
+
+#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
+
+#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
+
+#define REG_A3XX_UNKNOWN_0E43 0x00000e43
+
+#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
+
+#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
+
+#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
+
+#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
+
+#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
+
+#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
+
+#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
+
+#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
+
+#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
+
+#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
+
+#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
+}
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+
+#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
+
+#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
+
+#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
+
+#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
+
+#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
+
+#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
+
+#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
+
+#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
+
+#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
+
+#define REG_A3XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
+
+#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+
+#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
+#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
+#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
+#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
+#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
+#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
+
+#define REG_A3XX_TEX_SAMP_1 0x00000001
+
+#define REG_A3XX_TEX_CONST_0 0x00000000
+#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A3XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
+{
+ return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
+}
+#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
+#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_1 0x00000001
+#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
+#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
+#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
+static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
+{
+ return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_2 0x00000002
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
+#define A3XX_TEX_CONST_2_INDX__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
+}
+#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
+#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
+static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_3 0x00000003
+
+
+#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 00000000000..035bd13dc8b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "a3xx_gpu.h"
+
+#define A3XX_INT0_MASK \
+ (A3XX_INT0_RBBM_AHB_ERROR | \
+ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A3XX_INT0_CP_T0_PACKET_IN_IB | \
+ A3XX_INT0_CP_OPCODE_ERROR | \
+ A3XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A3XX_INT0_CP_HW_FAULT | \
+ A3XX_INT0_CP_IB1_INT | \
+ A3XX_INT0_CP_IB2_INT | \
+ A3XX_INT0_CP_RB_INT | \
+ A3XX_INT0_CP_REG_PROTECT_FAULT | \
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+static struct platform_device *a3xx_pdev;
+
+static void a3xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+ gpu->funcs->idle(gpu);
+}
+
+static int a3xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ DBG("%s", gpu->name);
+
+ if (adreno_is_a305(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+
+ } else if (adreno_is_a320(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU:
+ */
+ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
+
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter: */
+ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection: */
+ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting: */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Turn on the power counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
+ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating: */
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+
+ /* Set the OCMEM base address for A330 */
+//TODO:
+// if (adreno_is_a330(adreno_gpu)) {
+// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+// }
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
+ * we will use this to augment our hang detection:
+ */
+ gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
+ SP_FS_FULL_ALU_INSTRUCTIONS);
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
+
+ /* RB registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
+
+ /* VBIF registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->pm4->data);
+ len = adreno_gpu->pm4->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->pfp->data);
+ len = adreno_gpu->pfp->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
+
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ a3xx_me_init(gpu);
+
+ return 0;
+}
+
+static void a3xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ put_device(&a3xx_gpu->pdev->dev);
+ kfree(a3xx_gpu);
+}
+
+static void a3xx_idle(struct msm_gpu *gpu)
+{
+ unsigned long t;
+
+ /* wait for ringbuffer to drain: */
+ adreno_idle(gpu);
+
+ t = jiffies + ADRENO_IDLE_TIMEOUT;
+
+ /* then wait for GPU to finish: */
+ do {
+ uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+ if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
+ return;
+ } while(time_before(jiffies, t));
+
+ DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
+ DBG("%s: %08x", gpu->name, status);
+
+ // TODO
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
+ 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
+ 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+ 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+ 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+ 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+ 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+ 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+ 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
+ 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
+ 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
+ 0x303c, 0x303c, 0x305e, 0x305f,
+};
+
+static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ int i;
+
+ adreno_show(gpu, m);
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+
+ /* dump these out in a form that can be parsed by demsm: */
+ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
+ uint32_t start = a3xx_registers[i];
+ uint32_t end = a3xx_registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+#endif
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = adreno_recover,
+ .last_fence = adreno_last_fence,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .idle = a3xx_idle,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a3xx_show,
+#endif
+ },
+};
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+{
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct msm_gpu *gpu;
+ struct platform_device *pdev = a3xx_pdev;
+ struct adreno_platform_config *config;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a3xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
+ if (!a3xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ gpu = &a3xx_gpu->base.base;
+
+ get_device(&pdev->dev);
+ a3xx_gpu->pdev = pdev;
+
+ gpu->fast_rate = config->fast_rate;
+ gpu->slow_rate = config->slow_rate;
+ gpu->bus_freq = config->bus_freq;
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+
+ ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
+ &funcs, config->rev);
+ if (ret)
+ goto fail;
+
+ return &a3xx_gpu->base.base;
+
+fail:
+ if (a3xx_gpu)
+ a3xx_destroy(&a3xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * The a3xx device:
+ */
+
+static int a3xx_probe(struct platform_device *pdev)
+{
+ static struct adreno_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ uint32_t version = socinfo_get_version();
+ if (cpu_is_apq8064ab()) {
+ config.fast_rate = 450000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MAJOR(version) == 2)
+ config.rev = ADRENO_REV(3, 2, 0, 2);
+ else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 1))
+ config.rev = ADRENO_REV(3, 2, 0, 1);
+ else
+ config.rev = ADRENO_REV(3, 2, 0, 0);
+
+ } else if (cpu_is_msm8930()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 3;
+
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 2))
+ config.rev = ADRENO_REV(3, 0, 5, 2);
+ else
+ config.rev = ADRENO_REV(3, 0, 5, 0);
+
+ }
+#endif
+ pdev->dev.platform_data = &config;
+ a3xx_pdev = pdev;
+ return 0;
+}
+
+static int a3xx_remove(struct platform_device *pdev)
+{
+ a3xx_pdev = NULL;
+ return 0;
+}
+
+static struct platform_driver a3xx_driver = {
+ .probe = a3xx_probe,
+ .remove = a3xx_remove,
+ .driver.name = "kgsl-3d0",
+};
+
+void __init a3xx_register(void)
+{
+ platform_driver_register(&a3xx_driver);
+}
+
+void __exit a3xx_unregister(void)
+{
+ platform_driver_unregister(&a3xx_driver);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 00000000000..32c398c2d00
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __A3XX_GPU_H__
+#define __A3XX_GPU_H__
+
+#include "adreno_gpu.h"
+#include "a3xx.xml.h"
+
+struct a3xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+};
+#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+
+#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 00000000000..61979d458ac
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
+#ifndef ADRENO_COMMON_XML
+#define ADRENO_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum adreno_pa_su_sc_draw {
+ PC_DRAW_POINTS = 0,
+ PC_DRAW_LINES = 1,
+ PC_DRAW_TRIANGLES = 2,
+};
+
+enum adreno_compare_func {
+ FUNC_NEVER = 0,
+ FUNC_LESS = 1,
+ FUNC_EQUAL = 2,
+ FUNC_LEQUAL = 3,
+ FUNC_GREATER = 4,
+ FUNC_NOTEQUAL = 5,
+ FUNC_GEQUAL = 6,
+ FUNC_ALWAYS = 7,
+};
+
+enum adreno_stencil_op {
+ STENCIL_KEEP = 0,
+ STENCIL_ZERO = 1,
+ STENCIL_REPLACE = 2,
+ STENCIL_INCR_CLAMP = 3,
+ STENCIL_DECR_CLAMP = 4,
+ STENCIL_INVERT = 5,
+ STENCIL_INCR_WRAP = 6,
+ STENCIL_DECR_WRAP = 7,
+};
+
+enum adreno_rb_blend_factor {
+ FACTOR_ZERO = 0,
+ FACTOR_ONE = 1,
+ FACTOR_SRC_COLOR = 4,
+ FACTOR_ONE_MINUS_SRC_COLOR = 5,
+ FACTOR_SRC_ALPHA = 6,
+ FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ FACTOR_DST_COLOR = 8,
+ FACTOR_ONE_MINUS_DST_COLOR = 9,
+ FACTOR_DST_ALPHA = 10,
+ FACTOR_ONE_MINUS_DST_ALPHA = 11,
+ FACTOR_CONSTANT_COLOR = 12,
+ FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
+ FACTOR_CONSTANT_ALPHA = 14,
+ FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
+ FACTOR_SRC_ALPHA_SATURATE = 16,
+};
+
+enum adreno_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
+enum adreno_rb_surface_endian {
+ ENDIAN_NONE = 0,
+ ENDIAN_8IN16 = 1,
+ ENDIAN_8IN32 = 2,
+ ENDIAN_16IN32 = 3,
+ ENDIAN_8IN64 = 4,
+ ENDIAN_8IN128 = 5,
+};
+
+enum adreno_rb_dither_mode {
+ DITHER_DISABLE = 0,
+ DITHER_ALWAYS = 1,
+ DITHER_IF_ALPHA_OFF = 2,
+};
+
+enum adreno_rb_depth_format {
+ DEPTHX_16 = 0,
+ DEPTHX_24_8 = 1,
+};
+
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
+#define REG_AXXX_MH_MMU_CONFIG 0x00000040
+#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
+
+#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
+
+#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_AXXX_MH_MMU_MPU_END 0x00000047
+
+#define REG_AXXX_CP_RB_BASE 0x000001c0
+
+#define REG_AXXX_CP_RB_CNTL 0x000001c1
+#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
+#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
+static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
+#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
+static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
+#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
+static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
+}
+#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
+#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
+#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
+
+#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
+}
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+}
+
+#define REG_AXXX_CP_RB_RPTR 0x000001c4
+
+#define REG_AXXX_CP_RB_WPTR 0x000001c5
+
+#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
+
+#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
+
+#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
+
+#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+
+#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
+#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
+#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
+#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
+#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
+}
+
+#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
+#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
+#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
+static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
+{
+ return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
+#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
+#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
+static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
+}
+
+#define REG_AXXX_SCRATCH_UMSK 0x000001dc
+#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
+#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
+static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
+}
+#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
+#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
+static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
+}
+
+#define REG_AXXX_SCRATCH_ADDR 0x000001dd
+
+#define REG_AXXX_CP_ME_RDADDR 0x000001ea
+
+#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
+
+#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
+
+#define REG_AXXX_CP_INT_CNTL 0x000001f2
+
+#define REG_AXXX_CP_INT_STATUS 0x000001f3
+
+#define REG_AXXX_CP_INT_ACK 0x000001f4
+
+#define REG_AXXX_CP_ME_CNTL 0x000001f6
+
+#define REG_AXXX_CP_ME_STATUS 0x000001f7
+
+#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
+
+#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
+
+#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
+
+#define REG_AXXX_CP_DEBUG 0x000001fc
+#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
+#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
+#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
+#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
+#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
+#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
+#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
+#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
+
+#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
+#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
+
+#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
+
+#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
+
+#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
+
+#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
+
+#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
+
+#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
+
+#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+
+#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
+
+#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
+
+#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
+
+#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
+
+#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+
+
+#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 00000000000..a60584763b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "adreno_gpu.h"
+#include "msm_gem.h"
+
+struct adreno_info {
+ struct adreno_rev rev;
+ uint32_t revn;
+ const char *name;
+ const char *pm4fw, *pfpfw;
+ uint32_t gmem;
+};
+
+#define ANY_ID 0xff
+
+static const struct adreno_info gpulist[] = {
+ {
+ .rev = ADRENO_REV(3, 0, 5, ANY_ID),
+ .revn = 305,
+ .name = "A305",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_256K,
+ }, {
+ .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
+ .revn = 320,
+ .name = "A320",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_512K,
+ }, {
+ .rev = ADRENO_REV(3, 3, 0, 0),
+ .revn = 330,
+ .name = "A330",
+ .pm4fw = "a330_pm4.fw",
+ .pfpfw = "a330_pfp.fw",
+ .gmem = SZ_1M,
+ },
+};
+
+#define RB_SIZE SZ_32K
+#define RB_BLKSIZE 16
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ switch (param) {
+ case MSM_PARAM_GPU_ID:
+ *value = adreno_gpu->info->revn;
+ return 0;
+ case MSM_PARAM_GMEM_SIZE:
+ *value = adreno_gpu->info->gmem;
+ return 0;
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+int adreno_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ DBG("%s", gpu->name);
+
+ /* Setup REG_CP_RB_CNTL: */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ /* size is log2(quad-words): */
+ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
+ AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+
+ /* Setup ringbuffer address: */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
+ gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+
+ /* Setup scratch/timestamp: */
+ gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+
+ gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+
+ return 0;
+}
+
+static uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return ring->cur - ring->start;
+}
+
+uint32_t adreno_last_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ return adreno_gpu->memptrs->fence;
+}
+
+void adreno_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ gpu->funcs->pm_suspend(gpu);
+
+ /* reset ringbuffer: */
+ gpu->rb->cur = gpu->rb->start;
+
+ /* reset completed fence seqno, just discard anything pending: */
+ adreno_gpu->memptrs->fence = gpu->submitted_fence;
+
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ /* hmm, oh well? */
+ }
+}
+
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /* on a320, at least, we seem to need to pad things out to an
+ * even number of qwords to avoid issue w/ CP hanging on wrap-
+ * around:
+ */
+ if (ibs % 2)
+ OUT_PKT2(ring);
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->fence);
+
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, submit->fence);
+
+ /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+#if 0
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+ }
+#endif
+
+ gpu->funcs->flush(gpu);
+
+ return 0;
+}
+
+void adreno_flush(struct msm_gpu *gpu)
+{
+ uint32_t wptr = get_wptr(gpu->rb);
+
+ /* ensure writes to ringbuffer have hit system memory: */
+ mb();
+
+ gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+}
+
+void adreno_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t rptr, wptr = get_wptr(gpu->rb);
+ unsigned long t;
+
+ t = jiffies + ADRENO_IDLE_TIMEOUT;
+
+ /* then wait for CP to drain ringbuffer: */
+ do {
+ rptr = adreno_gpu->memptrs->rptr;
+ if (rptr == wptr)
+ return;
+ } while(time_before(jiffies, t));
+
+ DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ gpu->submitted_fence);
+ seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
+ seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
+ seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+}
+#endif
+
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t freedwords;
+ do {
+ uint32_t size = gpu->rb->size / 4;
+ uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t rptr = adreno_gpu->memptrs->rptr;
+ freedwords = (rptr + (size - 1) - wptr) % size;
+ } while(freedwords < ndwords);
+}
+
+static const char *iommu_ports[] = {
+ "gfx3d_user", "gfx3d_priv",
+ "gfx3d1_user", "gfx3d1_priv",
+};
+
+static inline bool _rev_match(uint8_t entry, uint8_t id)
+{
+ return (entry == ANY_ID) || (entry == id);
+}
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev)
+{
+ int i, ret;
+
+ /* identify gpu: */
+ for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ const struct adreno_info *info = &gpulist[i];
+ if (_rev_match(info->rev.core, rev.core) &&
+ _rev_match(info->rev.major, rev.major) &&
+ _rev_match(info->rev.minor, rev.minor) &&
+ _rev_match(info->rev.patchid, rev.patchid)) {
+ gpu->info = info;
+ gpu->revn = info->revn;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(gpulist)) {
+ dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ rev.core, rev.major, rev.minor, rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
+ rev.core, rev.major, rev.minor, rev.patchid);
+
+ gpu->funcs = funcs;
+ gpu->rev = rev;
+
+ ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
+ gpu->info->pm4fw, ret);
+ return ret;
+ }
+
+ ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
+ gpu->info->pfpfw, ret);
+ return ret;
+ }
+
+ ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
+ gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+ RB_SIZE);
+ if (ret)
+ return ret;
+
+ ret = msm_iommu_attach(drm, gpu->base.iommu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ if (ret)
+ return ret;
+
+ gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
+ MSM_BO_UNCACHED);
+ if (IS_ERR(gpu->memptrs_bo)) {
+ ret = PTR_ERR(gpu->memptrs_bo);
+ gpu->memptrs_bo = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ return ret;
+ }
+
+ gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ if (!gpu->memptrs) {
+ dev_err(drm->dev, "could not vmap memptrs\n");
+ return -ENOMEM;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ &gpu->memptrs_iova);
+ if (ret) {
+ dev_err(drm->dev, "could not map memptrs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+{
+ if (gpu->memptrs_bo) {
+ if (gpu->memptrs_iova)
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ drm_gem_object_unreference(gpu->memptrs_bo);
+ }
+ if (gpu->pm4)
+ release_firmware(gpu->pm4);
+ if (gpu->pfp)
+ release_firmware(gpu->pfp);
+ msm_gpu_cleanup(&gpu->base);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 00000000000..f73abfba7c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ADRENO_GPU_H__
+#define __ADRENO_GPU_H__
+
+#include <linux/firmware.h>
+
+#include "msm_gpu.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+
+struct adreno_rev {
+ uint8_t core;
+ uint8_t major;
+ uint8_t minor;
+ uint8_t patchid;
+};
+
+#define ADRENO_REV(core, major, minor, patchid) \
+ ((struct adreno_rev){ core, major, minor, patchid })
+
+struct adreno_gpu_funcs {
+ struct msm_gpu_funcs base;
+};
+
+struct adreno_info;
+
+struct adreno_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t wptr;
+ volatile uint32_t fence;
+};
+
+struct adreno_gpu {
+ struct msm_gpu base;
+ struct adreno_rev rev;
+ const struct adreno_info *info;
+ uint32_t revn; /* numeric revision name */
+ const struct adreno_gpu_funcs *funcs;
+
+ /* firmware: */
+ const struct firmware *pm4, *pfp;
+
+ /* ringbuffer rptr/wptr: */
+ // TODO should this be in msm_ringbuffer? I think it would be
+ // different for z180..
+ struct adreno_rbmemptrs *memptrs;
+ struct drm_gem_object *memptrs_bo;
+ uint32_t memptrs_iova;
+};
+#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct adreno_platform_config {
+ struct adreno_rev rev;
+ uint32_t fast_rate, slow_rate, bus_freq;
+};
+
+#define ADRENO_IDLE_TIMEOUT (20 * 1000)
+
+static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn >= 300) && (gpu->revn < 400);
+}
+
+static inline bool adreno_is_a305(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 305;
+}
+
+static inline bool adreno_is_a320(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 320;
+}
+
+static inline bool adreno_is_a330(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 330;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+int adreno_hw_init(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu);
+void adreno_recover(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+void adreno_flush(struct msm_gpu *gpu);
+void adreno_idle(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev);
+void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+
+
+/* ringbuffer helpers (the parts that are adreno specific) */
+
+static inline void
+OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
+}
+
+/* no-op packet: */
+static inline void
+OUT_PKT2(struct msm_ringbuffer *ring)
+{
+ adreno_wait_ring(ring->gpu, 1);
+ OUT_RING(ring, CP_TYPE2_PKT);
+}
+
+static inline void
+OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
+}
+
+
+#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 00000000000..94c13f418e7
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
+#ifndef ADRENO_PM4_XML
+#define ADRENO_PM4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum vgt_event_type {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ HLSQ_FLUSH = 7,
+ VIZQUERY_START = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+};
+
+enum pc_di_primtype {
+ DI_PT_NONE = 0,
+ DI_PT_POINTLIST = 1,
+ DI_PT_LINELIST = 2,
+ DI_PT_LINESTRIP = 3,
+ DI_PT_TRILIST = 4,
+ DI_PT_TRIFAN = 5,
+ DI_PT_TRISTRIP = 6,
+ DI_PT_RECTLIST = 8,
+ DI_PT_QUADLIST = 13,
+ DI_PT_QUADSTRIP = 14,
+ DI_PT_POLYGON = 15,
+ DI_PT_2D_COPY_RECT_LIST_V0 = 16,
+ DI_PT_2D_COPY_RECT_LIST_V1 = 17,
+ DI_PT_2D_COPY_RECT_LIST_V2 = 18,
+ DI_PT_2D_COPY_RECT_LIST_V3 = 19,
+ DI_PT_2D_FILL_RECT_LIST = 20,
+ DI_PT_2D_LINE_STRIP = 21,
+ DI_PT_2D_TRI_STRIP = 22,
+};
+
+enum pc_di_src_sel {
+ DI_SRC_SEL_DMA = 0,
+ DI_SRC_SEL_IMMEDIATE = 1,
+ DI_SRC_SEL_AUTO_INDEX = 2,
+ DI_SRC_SEL_RESERVED = 3,
+};
+
+enum pc_di_index_size {
+ INDEX_SIZE_IGN = 0,
+ INDEX_SIZE_16_BIT = 0,
+ INDEX_SIZE_32_BIT = 1,
+ INDEX_SIZE_8_BIT = 2,
+ INDEX_SIZE_INVALID = 0,
+};
+
+enum pc_di_vis_cull_mode {
+ IGNORE_VISIBILITY = 0,
+};
+
+enum adreno_pm4_packet_type {
+ CP_TYPE0_PKT = 0,
+ CP_TYPE1_PKT = 0x40000000,
+ CP_TYPE2_PKT = 0x80000000,
+ CP_TYPE3_PKT = 0xc0000000,
+};
+
+enum adreno_pm4_type3_packets {
+ CP_ME_INIT = 72,
+ CP_NOP = 16,
+ CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_PFD = 55,
+ CP_WAIT_FOR_IDLE = 38,
+ CP_WAIT_REG_MEM = 60,
+ CP_WAIT_REG_EQ = 82,
+ CP_WAT_REG_GTE = 83,
+ CP_WAIT_UNTIL_READ = 92,
+ CP_WAIT_IB_PFD_COMPLETE = 93,
+ CP_REG_RMW = 33,
+ CP_SET_BIN_DATA = 47,
+ CP_REG_TO_MEM = 62,
+ CP_MEM_WRITE = 61,
+ CP_MEM_WRITE_CNTR = 79,
+ CP_COND_EXEC = 68,
+ CP_COND_WRITE = 69,
+ CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE_SHD = 88,
+ CP_EVENT_WRITE_CFL = 89,
+ CP_EVENT_WRITE_ZPD = 91,
+ CP_RUN_OPENCL = 49,
+ CP_DRAW_INDX = 34,
+ CP_DRAW_INDX_2 = 54,
+ CP_DRAW_INDX_BIN = 52,
+ CP_DRAW_INDX_2_BIN = 53,
+ CP_VIZ_QUERY = 35,
+ CP_SET_STATE = 37,
+ CP_SET_CONSTANT = 45,
+ CP_IM_LOAD = 39,
+ CP_IM_LOAD_IMMEDIATE = 43,
+ CP_LOAD_CONSTANT_CONTEXT = 46,
+ CP_INVALIDATE_STATE = 59,
+ CP_SET_SHADER_BASES = 74,
+ CP_SET_BIN_MASK = 80,
+ CP_SET_BIN_SELECT = 81,
+ CP_CONTEXT_UPDATE = 94,
+ CP_INTERRUPT = 64,
+ CP_IM_STORE = 44,
+ CP_SET_BIN_BASE_OFFSET = 75,
+ CP_SET_DRAW_INIT_FLAGS = 75,
+ CP_SET_PROTECTED_MODE = 95,
+ CP_LOAD_STATE = 48,
+ CP_COND_INDIRECT_BUFFER_PFE = 58,
+ CP_COND_INDIRECT_BUFFER_PFD = 50,
+ CP_INDIRECT_BUFFER_PFE = 63,
+ CP_SET_BIN = 76,
+};
+
+enum adreno_state_block {
+ SB_VERT_TEX = 0,
+ SB_VERT_MIPADDR = 1,
+ SB_FRAG_TEX = 2,
+ SB_FRAG_MIPADDR = 3,
+ SB_VERT_SHADER = 4,
+ SB_FRAG_SHADER = 6,
+};
+
+enum adreno_state_type {
+ ST_SHADER = 0,
+ ST_CONSTANTS = 1,
+};
+
+enum adreno_state_src {
+ SS_DIRECT = 0,
+ SS_INDIRECT = 4,
+};
+
+#define REG_CP_LOAD_STATE_0 0x00000000
+#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
+#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
+#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
+static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE_1 0x00000001
+#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
+{
+ return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_0 0x00000000
+
+#define REG_CP_SET_BIN_1 0x00000001
+#define CP_SET_BIN_1_X1__MASK 0x0000ffff
+#define CP_SET_BIN_1_X1__SHIFT 0
+static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
+}
+#define CP_SET_BIN_1_Y1__MASK 0xffff0000
+#define CP_SET_BIN_1_Y1__SHIFT 16
+static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
+}
+
+#define REG_CP_SET_BIN_2 0x00000002
+#define CP_SET_BIN_2_X2__MASK 0x0000ffff
+#define CP_SET_BIN_2_X2__SHIFT 0
+static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
+}
+#define CP_SET_BIN_2_Y2__MASK 0xffff0000
+#define CP_SET_BIN_2_Y2__SHIFT 16
+static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
+}
+
+
+#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 00000000000..6f8396be431
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
+#ifndef DSI_XML
+#define DSI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum dsi_traffic_mode {
+ NON_BURST_SYNCH_PULSE = 0,
+ NON_BURST_SYNCH_EVENT = 1,
+ BURST_MODE = 2,
+};
+
+enum dsi_dst_format {
+ DST_FORMAT_RGB565 = 0,
+ DST_FORMAT_RGB666 = 1,
+ DST_FORMAT_RGB666_LOOSE = 2,
+ DST_FORMAT_RGB888 = 3,
+};
+
+enum dsi_rgb_swap {
+ SWAP_RGB = 0,
+ SWAP_RBG = 1,
+ SWAP_BGR = 2,
+ SWAP_BRG = 3,
+ SWAP_GRB = 4,
+ SWAP_GBR = 5,
+};
+
+enum dsi_cmd_trigger {
+ TRIGGER_NONE = 0,
+ TRIGGER_TE = 2,
+ TRIGGER_SW = 4,
+ TRIGGER_SW_SEOF = 5,
+ TRIGGER_SW_TE = 6,
+};
+
+#define DSI_IRQ_CMD_DMA_DONE 0x00000001
+#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
+#define DSI_IRQ_CMD_MDP_DONE 0x00000100
+#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
+#define DSI_IRQ_VIDEO_DONE 0x00010000
+#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_ERROR 0x01000000
+#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_CTRL 0x00000000
+#define DSI_CTRL_ENABLE 0x00000001
+#define DSI_CTRL_VID_MODE_EN 0x00000002
+#define DSI_CTRL_CMD_MODE_EN 0x00000004
+#define DSI_CTRL_LANE0 0x00000010
+#define DSI_CTRL_LANE1 0x00000020
+#define DSI_CTRL_LANE2 0x00000040
+#define DSI_CTRL_LANE3 0x00000080
+#define DSI_CTRL_CLK_EN 0x00000100
+#define DSI_CTRL_ECC_CHECK 0x00100000
+#define DSI_CTRL_CRC_CHECK 0x01000000
+
+#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
+#define DSI_STATUS0_DSI_BUSY 0x00000010
+
+#define REG_DSI_FIFO_STATUS 0x00000008
+
+#define REG_DSI_VID_CFG0 0x0000000c
+#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
+#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
+static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
+}
+#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
+#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+{
+ return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
+#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
+static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
+{
+ return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
+}
+#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
+#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
+#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
+#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
+#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
+#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
+
+#define REG_DSI_VID_CFG1 0x0000001c
+#define DSI_VID_CFG1_R_SEL 0x00000010
+#define DSI_VID_CFG1_G_SEL 0x00000100
+#define DSI_VID_CFG1_B_SEL 0x00001000
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
+}
+#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
+}
+
+#define REG_DSI_ACTIVE_H 0x00000020
+#define DSI_ACTIVE_H_START__MASK 0x00000fff
+#define DSI_ACTIVE_H_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
+}
+#define DSI_ACTIVE_H_END__MASK 0x0fff0000
+#define DSI_ACTIVE_H_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_V 0x00000024
+#define DSI_ACTIVE_V_START__MASK 0x00000fff
+#define DSI_ACTIVE_V_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
+}
+#define DSI_ACTIVE_V_END__MASK 0x0fff0000
+#define DSI_ACTIVE_V_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
+}
+
+#define REG_DSI_TOTAL 0x00000028
+#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACTIVE_HSYNC 0x0000002c
+#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
+}
+#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC 0x00000034
+#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
+#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
+
+#define REG_DSI_CMD_CFG0 0x0000003c
+
+#define REG_DSI_CMD_CFG1 0x00000040
+
+#define REG_DSI_DMA_BASE 0x00000044
+
+#define REG_DSI_DMA_LEN 0x00000048
+
+#define REG_DSI_ACK_ERR_STATUS 0x00000064
+
+static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+#define REG_DSI_TRIG_CTRL 0x00000080
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
+static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
+static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_TE 0x80000000
+
+#define REG_DSI_TRIG_DMA 0x0000008c
+
+#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+
+#define REG_DSI_TIMEOUT_STATUS 0x000000bc
+
+#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
+}
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
+}
+
+#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
+#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
+#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
+
+#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+
+#define REG_DSI_ERR_INT_MASK0 0x00000108
+
+#define REG_DSI_INTR_CTRL 0x0000010c
+
+#define REG_DSI_RESET 0x00000114
+
+#define REG_DSI_CLK_CTRL 0x00000118
+
+#define REG_DSI_PHY_RESET 0x00000128
+
+#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
+#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
+
+#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
+
+#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
+
+#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
+
+#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
+
+#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
+
+#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
+
+#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
+
+#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
+
+#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
+
+#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
+
+#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
+
+#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
+
+#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
+
+#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
+
+#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
+
+#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
+
+#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
+
+#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
+
+#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
+
+#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
+
+#define REG_DSI_PHY_PLL_STATUS 0x00000280
+#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
+
+#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
+
+#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
+
+#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
+
+#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
+
+#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
+
+#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
+
+#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
+
+#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
+
+#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
+
+#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
+
+#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
+#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
+
+static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
+
+#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
+
+#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
+
+#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
+
+#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
+
+#define REG_DSI_8960_PHY_CTRL_0 0x00000470
+
+#define REG_DSI_8960_PHY_CTRL_1 0x00000474
+
+#define REG_DSI_8960_PHY_CTRL_2 0x00000478
+
+#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
+
+#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
+
+#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
+
+#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
+
+#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
+
+#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
+
+#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
+
+#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
+
+#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
+
+#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
+
+#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
+
+#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
+
+#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
+#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+
+
+#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 00000000000..aefc1b8feae
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
+#ifndef MMSS_CC_XML
+#define MMSS_CC_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mmss_cc_clk {
+ CLK = 0,
+ PCLK = 1,
+};
+
+#define REG_MMSS_CC_AHB 0x00000008
+
+static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
+{
+ switch (idx) {
+ case CLK: return 0x0000004c;
+ case PCLK: return 0x00000130;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+
+static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
+#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
+#define MMSS_CC_CLK_CC_MND_EN 0x00000020
+#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
+#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
+static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
+}
+#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
+#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
+#define MMSS_CC_CLK_MD_D__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
+}
+#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
+#define MMSS_CC_CLK_MD_M__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
+#define MMSS_CC_CLK_NS_SRC__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
+}
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
+static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
+}
+#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
+#define MMSS_CC_CLK_NS_VAL__SHIFT 24
+static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
+}
+
+
+#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 00000000000..a225e8170b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
+#ifndef SFPB_XML
+#define SFPB_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_SFPB_CFG 0x00000058
+
+
+#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 00000000000..50d11df35b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+static struct platform_device *hdmi_pdev;
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl = HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+static irqreturn_t hdmi_irq(int irq, void *dev_id)
+{
+ struct hdmi *hdmi = dev_id;
+
+ /* Process HPD: */
+ hdmi_connector_irq(hdmi->connector);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+void hdmi_destroy(struct kref *kref)
+{
+ struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (phy)
+ phy->funcs->destroy(phy);
+
+ if (hdmi->i2c)
+ hdmi_i2c_destroy(hdmi->i2c);
+
+ put_device(&hdmi->pdev->dev);
+}
+
+/* initialize connector */
+int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct hdmi *hdmi = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = hdmi_pdev;
+ struct hdmi_platform_config *config;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no hdmi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ kref_init(&hdmi->refcount);
+
+ get_device(&pdev->dev);
+
+ hdmi->dev = dev;
+ hdmi->pdev = pdev;
+ hdmi->encoder = encoder;
+
+ /* not sure about which phy maps to which msm.. probably I miss some */
+ if (config->phy_init)
+ hdmi->phy = config->phy_init(hdmi);
+ else
+ hdmi->phy = ERR_PTR(-ENXIO);
+
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(dev->dev, "failed to load phy: %d\n", ret);
+ hdmi->phy = NULL;
+ goto fail;
+ }
+
+ hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ goto fail;
+ }
+
+ hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
+ if (IS_ERR(hdmi->mvs))
+ hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
+ if (IS_ERR(hdmi->mvs)) {
+ ret = PTR_ERR(hdmi->mvs);
+ dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
+ if (IS_ERR(hdmi->mpp0))
+ hdmi->mpp0 = NULL;
+
+ hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(hdmi->clk)) {
+ ret = PTR_ERR(hdmi->clk);
+ dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
+ if (IS_ERR(hdmi->m_pclk)) {
+ ret = PTR_ERR(hdmi->m_pclk);
+ dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
+ if (IS_ERR(hdmi->s_pclk)) {
+ ret = PTR_ERR(hdmi->s_pclk);
+ dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->i2c = hdmi_i2c_init(hdmi);
+ if (IS_ERR(hdmi->i2c)) {
+ ret = PTR_ERR(hdmi->i2c);
+ dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+ hdmi->i2c = NULL;
+ goto fail;
+ }
+
+ hdmi->bridge = hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ ret = PTR_ERR(hdmi->bridge);
+ dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ hdmi->bridge = NULL;
+ goto fail;
+ }
+
+ hdmi->connector = hdmi_connector_init(hdmi);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ hdmi->connector = NULL;
+ goto fail;
+ }
+
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+ NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
+
+ encoder->bridge = hdmi->bridge;
+
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+ priv->connectors[priv->num_connectors++] = hdmi->connector;
+
+ return 0;
+
+fail:
+ if (hdmi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (hdmi->bridge)
+ hdmi->bridge->funcs->destroy(hdmi->bridge);
+ if (hdmi->connector)
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi_destroy(&hdmi->refcount);
+ }
+
+ return ret;
+}
+
+/*
+ * The hdmi device:
+ */
+
+static int hdmi_dev_probe(struct platform_device *pdev)
+{
+ static struct hdmi_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ if (cpu_is_apq8064()) {
+ config.phy_init = hdmi_phy_8960_init;
+ config.ddc_clk_gpio = 70;
+ config.ddc_data_gpio = 71;
+ config.hpd_gpio = 72;
+ config.pmic_gpio = 13 + NR_GPIO_IRQS;
+ } else if (cpu_is_msm8960()) {
+ config.phy_init = hdmi_phy_8960_init;
+ config.ddc_clk_gpio = 100;
+ config.ddc_data_gpio = 101;
+ config.hpd_gpio = 102;
+ config.pmic_gpio = -1;
+ } else if (cpu_is_msm8x60()) {
+ config.phy_init = hdmi_phy_8x60_init;
+ config.ddc_clk_gpio = 170;
+ config.ddc_data_gpio = 171;
+ config.hpd_gpio = 172;
+ config.pmic_gpio = -1;
+ }
+#endif
+ pdev->dev.platform_data = &config;
+ hdmi_pdev = pdev;
+ return 0;
+}
+
+static int hdmi_dev_remove(struct platform_device *pdev)
+{
+ hdmi_pdev = NULL;
+ return 0;
+}
+
+static struct platform_driver hdmi_driver = {
+ .probe = hdmi_dev_probe,
+ .remove = hdmi_dev_remove,
+ .driver.name = "hdmi_msm",
+};
+
+void __init hdmi_register(void)
+{
+ platform_driver_register(&hdmi_driver);
+}
+
+void __exit hdmi_unregister(void)
+{
+ platform_driver_unregister(&hdmi_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 00000000000..2c2ec566394
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HDMI_CONNECTOR_H__
+#define __HDMI_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "hdmi.xml.h"
+
+
+struct hdmi_phy;
+
+struct hdmi {
+ struct kref refcount;
+
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ void __iomem *mmio;
+
+ struct regulator *mvs; /* HDMI_5V */
+ struct regulator *mpp0; /* External 5V */
+
+ struct clk *clk;
+ struct clk *m_pclk;
+ struct clk *s_pclk;
+
+ struct hdmi_phy *phy;
+ struct i2c_adapter *i2c;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ /* the encoder we are hooked to (outside of hdmi block) */
+ struct drm_encoder *encoder;
+
+ bool hdmi_mode; /* are we in hdmi mode? */
+
+ int irq;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct hdmi_platform_config {
+ struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
+ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+};
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+void hdmi_destroy(struct kref *kref);
+
+static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
+{
+ msm_writel(data, hdmi->mmio + reg);
+}
+
+static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->mmio + reg);
+}
+
+static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
+{
+ kref_get(&hdmi->refcount);
+ return hdmi;
+}
+
+static inline void hdmi_unreference(struct hdmi *hdmi)
+{
+ kref_put(&hdmi->refcount, hdmi_destroy);
+}
+
+/*
+ * The phy appears to be different, for example between 8960 and 8x60,
+ * so split the phy related functions out and load the correct one at
+ * runtime:
+ */
+
+struct hdmi_phy_funcs {
+ void (*destroy)(struct hdmi_phy *phy);
+ void (*reset)(struct hdmi_phy *phy);
+ void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
+ void (*powerdown)(struct hdmi_phy *phy);
+};
+
+struct hdmi_phy {
+ const struct hdmi_phy_funcs *funcs;
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+
+/*
+ * hdmi bridge:
+ */
+
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
+
+/*
+ * hdmi connector:
+ */
+
+void hdmi_connector_irq(struct drm_connector *connector);
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
+
+/*
+ * i2c adapter for ddc:
+ */
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c);
+void hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+
+#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 00000000000..f5fa4865e05
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
+#ifndef HDMI_XML
+#define HDMI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum hdmi_hdcp_key_state {
+ NO_KEYS = 0,
+ NOT_CHECKED = 1,
+ CHECKING = 2,
+ KEYS_VALID = 3,
+ AKSV_INVALID = 4,
+ CHECKSUM_MISMATCH = 5,
+};
+
+enum hdmi_ddc_read_write {
+ DDC_WRITE = 0,
+ DDC_READ = 1,
+};
+
+enum hdmi_acr_cts {
+ ACR_NONE = 0,
+ ACR_32 = 1,
+ ACR_44 = 2,
+ ACR_48 = 3,
+};
+
+#define REG_HDMI_CTRL 0x00000000
+#define HDMI_CTRL_ENABLE 0x00000001
+#define HDMI_CTRL_HDMI 0x00000002
+#define HDMI_CTRL_ENCRYPTED 0x00000004
+
+#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
+#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
+
+#define REG_HDMI_ACR_PKT_CTRL 0x00000024
+#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
+#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
+#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
+#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
+static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
+static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
+
+#define REG_HDMI_VBI_PKT_CTRL 0x00000028
+#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
+#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
+#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
+#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
+#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
+#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
+
+#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
+#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
+#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+
+#define REG_HDMI_GEN_PKT_CTRL 0x00000034
+#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
+#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
+#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
+}
+
+#define REG_HDMI_GC 0x00000040
+#define HDMI_GC_MUTE 0x00000001
+
+#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
+#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
+#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
+
+static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
+
+#define REG_HDMI_GENERIC0_HDR 0x00000084
+
+static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
+
+#define REG_HDMI_GENERIC1_HDR 0x000000a4
+
+static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+
+static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+#define HDMI_ACR_0_CTS__MASK 0xfffff000
+#define HDMI_ACR_0_CTS__SHIFT 12
+static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
+{
+ return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
+}
+
+static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+#define HDMI_ACR_1_N__MASK 0xffffffff
+#define HDMI_ACR_1_N__SHIFT 0
+static inline uint32_t HDMI_ACR_1_N(uint32_t val)
+{
+ return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO0 0x000000e4
+#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
+#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
+}
+#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
+#define HDMI_AUDIO_INFO0_CC__SHIFT 8
+static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO1 0x000000e8
+#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
+#define HDMI_AUDIO_INFO1_CA__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
+}
+#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
+#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
+static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
+}
+#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
+
+#define REG_HDMI_HDCP_CTRL 0x00000110
+#define HDMI_HDCP_CTRL_ENABLE 0x00000001
+#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
+
+#define REG_HDMI_HDCP_INT_CTRL 0x00000118
+
+#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
+#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
+#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
+static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
+{
+ return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
+}
+
+#define REG_HDMI_HDCP_RESET 0x00000130
+#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+
+#define REG_HDMI_AUDIO_CFG 0x000001d0
+#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
+static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+}
+
+#define REG_HDMI_USEC_REFTIMER 0x00000208
+
+#define REG_HDMI_DDC_CTRL 0x0000020c
+#define HDMI_DDC_CTRL_GO 0x00000001
+#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
+#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
+#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
+static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_INT_CTRL 0x00000214
+#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
+#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
+#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
+
+#define REG_HDMI_DDC_SW_STATUS 0x00000218
+#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
+#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
+#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
+#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
+
+#define REG_HDMI_DDC_HW_STATUS 0x0000021c
+
+#define REG_HDMI_DDC_SPEED 0x00000220
+#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
+#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
+static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
+}
+#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
+#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
+static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
+}
+
+#define REG_HDMI_DDC_SETUP 0x00000224
+#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
+#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
+static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
+}
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
+#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
+}
+#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
+#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
+#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
+#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
+#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_DATA 0x00000238
+#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
+#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
+static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
+}
+#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
+#define HDMI_DDC_DATA_DATA__SHIFT 8
+static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
+}
+#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
+#define HDMI_DDC_DATA_INDEX__SHIFT 16
+static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
+}
+#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
+
+#define REG_HDMI_HPD_INT_STATUS 0x00000250
+#define HDMI_HPD_INT_STATUS_INT 0x00000001
+#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
+
+#define REG_HDMI_HPD_INT_CTRL 0x00000254
+#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
+#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
+#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
+#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
+#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
+#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
+
+#define REG_HDMI_HPD_CTRL 0x00000258
+#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
+#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
+static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
+}
+#define HDMI_HPD_CTRL_ENABLE 0x10000000
+
+#define REG_HDMI_DDC_REF 0x0000027c
+#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
+#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
+#define HDMI_DDC_REF_REFTIMER__SHIFT 0
+static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
+{
+ return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
+}
+
+#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define REG_HDMI_TOTAL 0x000002c0
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define HDMI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
+}
+#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
+static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_FRAME_CTRL 0x000002c8
+#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
+#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
+#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
+#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+
+#define REG_HDMI_PHY_CTRL 0x000002d4
+#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
+#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
+#define HDMI_PHY_CTRL_SW_RESET 0x00000004
+#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
+
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
+#define REG_HDMI_8x60_PHY_REG0 0x00000300
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
+static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG1 0x00000304
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
+static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
+}
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
+static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG2 0x00000308
+#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
+#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
+#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
+#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
+
+#define REG_HDMI_8x60_PHY_REG3 0x0000030c
+#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
+
+#define REG_HDMI_8x60_PHY_REG4 0x00000310
+
+#define REG_HDMI_8x60_PHY_REG5 0x00000314
+
+#define REG_HDMI_8x60_PHY_REG6 0x00000318
+
+#define REG_HDMI_8x60_PHY_REG7 0x0000031c
+
+#define REG_HDMI_8x60_PHY_REG8 0x00000320
+
+#define REG_HDMI_8x60_PHY_REG9 0x00000324
+
+#define REG_HDMI_8x60_PHY_REG10 0x00000328
+
+#define REG_HDMI_8x60_PHY_REG11 0x0000032c
+
+#define REG_HDMI_8x60_PHY_REG12 0x00000330
+#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
+#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
+#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
+
+#define REG_HDMI_8960_PHY_REG0 0x00000400
+
+#define REG_HDMI_8960_PHY_REG1 0x00000404
+
+#define REG_HDMI_8960_PHY_REG2 0x00000408
+
+#define REG_HDMI_8960_PHY_REG3 0x0000040c
+
+#define REG_HDMI_8960_PHY_REG4 0x00000410
+
+#define REG_HDMI_8960_PHY_REG5 0x00000414
+
+#define REG_HDMI_8960_PHY_REG6 0x00000418
+
+#define REG_HDMI_8960_PHY_REG7 0x0000041c
+
+#define REG_HDMI_8960_PHY_REG8 0x00000420
+
+#define REG_HDMI_8960_PHY_REG9 0x00000424
+
+#define REG_HDMI_8960_PHY_REG10 0x00000428
+
+#define REG_HDMI_8960_PHY_REG11 0x0000042c
+
+#define REG_HDMI_8960_PHY_REG12 0x00000430
+
+
+#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 00000000000..5a8ee3473cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_bridge {
+ struct drm_bridge base;
+
+ struct hdmi *hdmi;
+
+ unsigned long int pixclock;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
+static void hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ hdmi_unreference(hdmi_bridge->hdmi);
+ drm_bridge_cleanup(bridge);
+ kfree(hdmi_bridge);
+}
+
+static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power up");
+ phy->funcs->powerup(phy, hdmi_bridge->pixclock);
+ hdmi_set_mode(hdmi, true);
+}
+
+static void hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power down");
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->powerdown(phy);
+}
+
+static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi_bridge->pixclock = mode->clock * 1000;
+
+ hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DBG("frame_ctrl=%08x", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ // TODO until we have audio, this might be safest:
+ if (hdmi->hdmi_mode)
+ hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
+}
+
+static const struct drm_bridge_funcs hdmi_bridge_funcs = {
+ .pre_enable = hdmi_bridge_pre_enable,
+ .enable = hdmi_bridge_enable,
+ .disable = hdmi_bridge_disable,
+ .post_disable = hdmi_bridge_post_disable,
+ .mode_set = hdmi_bridge_mode_set,
+ .destroy = hdmi_bridge_destroy,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct hdmi_bridge *hdmi_bridge;
+ int ret;
+
+ hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
+ if (!hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_bridge->hdmi = hdmi_reference(hdmi);
+
+ bridge = &hdmi_bridge->base;
+
+ drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
+
+ return bridge;
+
+fail:
+ if (bridge)
+ hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 00000000000..823eee521a3
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+
+#include "hdmi.h"
+
+struct hdmi_connector {
+ struct drm_connector base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+
+static int gpio_config(struct hdmi *hdmi, bool on)
+{
+ struct drm_device *dev = hdmi->dev;
+ struct hdmi_platform_config *config =
+ hdmi->pdev->dev.platform_data;
+ int ret;
+
+ if (on) {
+ ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+ goto error1;
+ }
+ ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+ goto error2;
+ }
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error3;
+ }
+ if (config->pmic_gpio != -1) {
+ ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+ goto error4;
+ }
+ gpio_set_value_cansleep(config->pmic_gpio, 0);
+ }
+ DBG("gpio on");
+ } else {
+ gpio_free(config->ddc_clk_gpio);
+ gpio_free(config->ddc_data_gpio);
+ gpio_free(config->hpd_gpio);
+
+ if (config->pmic_gpio != -1) {
+ gpio_set_value_cansleep(config->pmic_gpio, 1);
+ gpio_free(config->pmic_gpio);
+ }
+ DBG("gpio off");
+ }
+
+ return 0;
+
+error4:
+ gpio_free(config->hpd_gpio);
+error3:
+ gpio_free(config->ddc_data_gpio);
+error2:
+ gpio_free(config->ddc_clk_gpio);
+error1:
+ return ret;
+}
+
+static int hpd_enable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ struct hdmi_phy *phy = hdmi->phy;
+ uint32_t hpd_ctrl;
+ int ret;
+
+ ret = gpio_config(hdmi, true);
+ if (ret) {
+ dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->clk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->m_pclk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->s_pclk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ if (hdmi->mpp0)
+ ret = regulator_enable(hdmi->mpp0);
+ if (!ret)
+ ret = regulator_enable(hdmi->mvs);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->reset(phy);
+ hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int hdp_disable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ int ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ hdmi_set_mode(hdmi, false);
+
+ if (hdmi->mpp0)
+ ret = regulator_disable(hdmi->mpp0);
+ if (!ret)
+ ret = regulator_disable(hdmi->mvs);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+ goto fail;
+ }
+
+ clk_disable_unprepare(hdmi->clk);
+ clk_disable_unprepare(hdmi->m_pclk);
+ clk_disable_unprepare(hdmi->s_pclk);
+
+ ret = gpio_config(hdmi, false);
+ if (ret) {
+ dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void hdmi_connector_irq(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
+
+ /* ack the irq: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+
+ drm_helper_hpd_irq_event(connector->dev);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!detected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+ }
+}
+
+static enum drm_connector_status hdmi_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ uint32_t hpd_int_status;
+ int retry = 20;
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ /* sense seems to in some cases be momentarily de-asserted, don't
+ * let that trick us into thinking the monitor is gone:
+ */
+ while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+ mdelay(10);
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ DBG("status=%08x", hpd_int_status);
+ }
+
+ return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+
+ hdp_disable(hdmi_connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ hdmi_unreference(hdmi_connector->hdmi);
+
+ kfree(hdmi_connector);
+}
+
+static int hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+ int ret = 0;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_connector->hdmi->encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
+static struct drm_encoder *
+hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ return hdmi_connector->hdmi->encoder;
+}
+
+static const struct drm_connector_funcs hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = hdmi_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+ .get_modes = hdmi_connector_get_modes,
+ .mode_valid = hdmi_connector_mode_valid,
+ .best_encoder = hdmi_connector_best_encoder,
+};
+
+/* initialize connector */
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = NULL;
+ struct hdmi_connector *hdmi_connector;
+ int ret;
+
+ hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
+ if (!hdmi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_connector->hdmi = hdmi_reference(hdmi);
+
+ connector = &hdmi_connector->base;
+
+ drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ ret = hpd_enable(hdmi_connector);
+ if (ret) {
+ dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
+ drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+
+ return connector;
+
+fail:
+ if (connector)
+ hdmi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 00000000000..f4ab7f70fed
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_i2c_adapter {
+ struct i2c_adapter base;
+ struct hdmi *hdmi;
+ bool sw_done;
+ wait_queue_head_t ddc_event;
+};
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SW_STATUS_RESET);
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SOFT_RESET);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ HDMI_DDC_SPEED_THRESHOLD(2) |
+ HDMI_DDC_SPEED_PRESCALE(10));
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 27us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(27));
+}
+
+static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ uint32_t retry = 0xffff;
+ uint32_t ddc_int_ctrl;
+
+ do {
+ --retry;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+ HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+ if (!retry) {
+ dev_err(dev->dev, "timeout waiting for DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ hdmi_i2c->sw_done = false;
+
+ return 0;
+}
+
+#define MAX_TRANSACTIONS 4
+
+static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ if (!hdmi_i2c->sw_done) {
+ uint32_t ddc_int_ctrl;
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
+ (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
+ hdmi_i2c->sw_done = true;
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK);
+ }
+ }
+
+ return hdmi_i2c->sw_done;
+}
+
+static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ static const uint32_t nack[] = {
+ HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
+ HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
+ };
+ int indices[MAX_TRANSACTIONS];
+ int ret, i, j, index = 0;
+ uint32_t ddc_status, ddc_data, i2c_trans;
+
+ num = min(num, MAX_TRANSACTIONS);
+
+ WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
+ if (num == 0)
+ return num;
+
+ init_ddc(hdmi_i2c);
+
+ ret = ddc_clear_irq(hdmi_i2c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+ uint32_t raw_addr = p->addr << 1;
+
+ if (p->flags & I2C_M_RD)
+ raw_addr |= 1;
+
+ ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+
+ if (i == 0) {
+ ddc_data |= HDMI_DDC_DATA_INDEX(0) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+
+ indices[i] = index;
+
+ if (p->flags & I2C_M_RD) {
+ index += p->len;
+ } else {
+ for (j = 0; j < p->len; j++) {
+ ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+ }
+ }
+
+ i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+ HDMI_I2C_TRANSACTION_REG_RW(
+ (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
+ HDMI_I2C_TRANSACTION_REG_START;
+
+ if (i == (num - 1))
+ i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
+
+ hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
+ }
+
+ /* trigger the transfer: */
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
+ HDMI_DDC_CTRL_GO);
+
+ ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ dev_warn(dev->dev, "DDC timeout: %d\n", ret);
+ DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ return ret;
+ }
+
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
+
+ /* read back results of any read transactions: */
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+
+ if (!(p->flags & I2C_M_RD))
+ continue;
+
+ /* check for NACK: */
+ if (ddc_status & nack[i]) {
+ DBG("ddc_status=%08x", ddc_status);
+ break;
+ }
+
+ ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
+ HDMI_DDC_DATA_INDEX(indices[i]) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+
+ /* discard first byte: */
+ hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+
+ for (j = 0; j < p->len; j++) {
+ ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+ p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
+ }
+ }
+
+ return i;
+}
+
+static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hdmi_i2c_algorithm = {
+ .master_xfer = hdmi_i2c_xfer,
+ .functionality = hdmi_i2c_func,
+};
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+
+ if (sw_done(hdmi_i2c))
+ wake_up_all(&hdmi_i2c->ddc_event);
+}
+
+void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ i2c_del_adapter(i2c);
+ kfree(hdmi_i2c);
+}
+
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
+{
+ struct drm_device *dev = hdmi->dev;
+ struct hdmi_i2c_adapter *hdmi_i2c;
+ struct i2c_adapter *i2c = NULL;
+ int ret;
+
+ hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
+ if (!hdmi_i2c) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ i2c = &hdmi_i2c->base;
+
+ hdmi_i2c->hdmi = hdmi;
+ init_waitqueue_head(&hdmi_i2c->ddc_event);
+
+
+ i2c->owner = THIS_MODULE;
+ i2c->class = I2C_CLASS_DDC;
+ snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
+ i2c->dev.parent = &hdmi->pdev->dev;
+ i2c->algo = &hdmi_i2c_algorithm;
+
+ ret = i2c_add_adapter(i2c);
+ if (ret) {
+ dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+ goto fail;
+ }
+
+ return i2c;
+
+fail:
+ if (i2c)
+ hdmi_i2c_destroy(i2c);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 00000000000..e5b7ed5b8f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8960 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ kfree(phy_8960);
+}
+
+static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
+}
+
+static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
+ .destroy = hdmi_phy_8960_destroy,
+ .reset = hdmi_phy_8960_reset,
+ .powerup = hdmi_phy_8960_powerup,
+ .powerdown = hdmi_phy_8960_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8960 *phy_8960;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
+ if (!phy_8960) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8960->base;
+
+ phy->funcs = &hdmi_phy_8960_funcs;
+
+ phy_8960->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8960_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 00000000000..391433c1af7
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x60 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
+
+static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ kfree(phy_8x60);
+}
+
+static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+}
+
+static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* De-serializer delay D/C for non-lbk mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
+ HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+
+ if (pixclock == 27000000) {
+ /* video_format == HDMI_VFRMT_720x480p60_16_9 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+ }
+
+ /* No matter what, start from the power down mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PowerGen on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PLL power on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Write to HIGH after PLL power down de-assert: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
+ HDMI_8x60_PHY_REG3_PLL_ENABLE);
+
+ /* ASIC power on; PHY REG9 = 0 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+
+ /* Enable PLL lock detect, PLL lock det will go high after lock
+ * Enable the re-time logic
+ */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+
+ /* Drivers are on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* If the RX detector is needed: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
+
+ /* If we want to use lock enable based on counting: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+ HDMI_8x60_PHY_REG12_FORCE_LOCK);
+}
+
+static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* Assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ HDMI_PHY_CTRL_SW_RESET);
+ udelay(10);
+ /* De-assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
+ /* Turn off Driver */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+ udelay(10);
+ /* Disable PLL */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
+ /* Power down PHY, but keep RX-sense: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
+ .destroy = hdmi_phy_8x60_destroy,
+ .reset = hdmi_phy_8x60_reset,
+ .powerup = hdmi_phy_8x60_powerup,
+ .powerdown = hdmi_phy_8x60_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x60 *phy_8x60;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
+ if (!phy_8x60) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x60->base;
+
+ phy->funcs = &hdmi_phy_8x60_funcs;
+
+ phy_8x60->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x60_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 00000000000..bee36363bcd
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
+#ifndef QFPROM_XML
+#define QFPROM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
+#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
+#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
+
+
+#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 00000000000..bbeeebe2db5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mpd4_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mpd4_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+enum mpd4_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mpd4_pipe {
+ VG1 = 0,
+ VG2 = 1,
+ RGB1 = 2,
+ RGB2 = 3,
+ RGB3 = 4,
+ VG3 = 5,
+ VG4 = 6,
+};
+
+enum mpd4_mixer {
+ MIXER0 = 0,
+ MIXER1 = 1,
+ MIXER2 = 2,
+};
+
+enum mpd4_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+};
+
+enum mdp4_intf {
+ INTF_LCDC_DTV = 0,
+ INTF_DSI_VIDEO = 1,
+ INTF_DSI_CMD = 2,
+ INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+ CURSOR_ARGB = 1,
+ CURSOR_XRGB = 2,
+};
+
+enum mdp4_dma {
+ DMA_P = 0,
+ DMA_S = 1,
+ DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
+#define MDP4_IRQ_DMA_S_DONE 0x00000004
+#define MDP4_IRQ_DMA_E_DONE 0x00000008
+#define MDP4_IRQ_DMA_P_DONE 0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
+#define REG_MDP4_VERSION 0x00000000
+#define MDP4_VERSION_MINOR__MASK 0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK 0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT 24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK 0x00000004
+
+#define REG_MDP4_OVLP1_KICK 0x00000008
+
+#define REG_MDP4_OVLP2_KICK 0x000000d0
+
+#define REG_MDP4_DMA_P_KICK 0x0000000c
+
+#define REG_MDP4_DMA_S_KICK 0x00000010
+
+#define REG_MDP4_DMA_E_KICK 0x00000014
+
+#define REG_MDP4_DISP_STATUS 0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
+
+#define REG_MDP4_RESET_STATUS 0x0000003c
+
+#define REG_MDP4_READ_CNFG 0x0000004c
+
+#define REG_MDP4_INTR_ENABLE 0x00000050
+
+#define REG_MDP4_INTR_STATUS 0x00000054
+
+#define REG_MDP4_INTR_CLEAR 0x00000058
+
+#define REG_MDP4_EBI2_LCD0 0x00000060
+
+#define REG_MDP4_EBI2_LCD1 0x00000064
+
+#define REG_MDP4_PORTMAP_MODE 0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0 0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1 0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR 0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00010000;
+ case 1: return 0x00018000;
+ case 2: return 0x00088000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000104;
+ case 1: return 0x00000124;
+ case 2: return 0x00000144;
+ case 3: return 0x00000160;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00001004;
+ case 1: return 0x00001404;
+ case 2: return 0x00001804;
+ case 3: return 0x00001b84;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+ switch (idx) {
+ case DMA_P: return 0x00090000;
+ case DMA_S: return 0x000a0000;
+ case DMA_E: return 0x000b0000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT 8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+ return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC 0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE 0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DTV 0x000d0000
+
+#define REG_MDP4_DTV_ENABLE 0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DSI 0x000e0000
+
+#define REG_MDP4_DSI_ENABLE 0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 00000000000..de6bea297cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp4_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ int id;
+ int ovlp;
+ enum mdp4_dma dma;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ struct {
+ spinlock_t lock;
+ bool stale;
+ uint32_t width, height;
+
+ /* next cursor to scan-out: */
+ uint32_t next_iova;
+ struct drm_gem_object *next_bo;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ } cursor;
+
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct work_struct pageflip_work;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp4_irq vblank;
+ struct mdp4_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+static void update_fb(struct drm_crtc *crtc, bool async,
+ struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp4_crtc->base.fb = new_fb;
+ mdp4_crtc->fb = new_fb;
+
+ if (!async) {
+ /* enable vblank to pick up the old_fb */
+ mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+ }
+}
+
+static void complete_flip(struct drm_crtc *crtc, bool canceled)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp4_crtc->event;
+ if (event) {
+ mdp4_crtc->event = NULL;
+ if (canceled)
+ event->base.destroy(&event->base);
+ else
+ drm_send_vblank_event(dev, mdp4_crtc->id, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t flush = 0;
+
+ flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void pageflip_worker(struct work_struct *work)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, pageflip_work);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+
+ mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
+ crtc_flush(crtc);
+
+ /* enable vblank to complete flip: */
+ mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_fb_work);
+ struct drm_device *dev = mdp4_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_cursor_work);
+ struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+
+ msm_gem_put_iova(val, mdp4_kms->id);
+ drm_gem_object_unreference_unlocked(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+ kfree(mdp4_crtc);
+}
+
+static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp4_crtc->name, mode);
+
+ if (enabled != mdp4_crtc->enabled) {
+ if (enabled) {
+ mdp4_enable(mdp4_kms);
+ mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
+ } else {
+ mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+ }
+ mdp4_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ int i, ovlp = mdp4_crtc->ovlp;
+ uint32_t mixer_cfg = 0;
+
+ /*
+ * This probably would also need to be triggered by any attached
+ * plane when it changes.. for now since we are only using a single
+ * private plane, the configuration is hard-coded:
+ */
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+ for (i = 0; i < 4; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
+ MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+ }
+
+ /* TODO single register for all CRTCs, so this won't work properly
+ * when multiple CRTCs are active..
+ */
+ switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
+ case VG1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN_ON("invalid pipe");
+ break;
+ }
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ int ret, ovlp = mdp4_crtc->ovlp;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp4_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+ MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+ /* take data from pipe: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
+ crtc->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+ MDP4_DMA_DST_SIZE_WIDTH(0) |
+ MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+ MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
+ crtc->fb->pitches[0]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+ update_fb(crtc, false, crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
+ if (dma == DMA_E) {
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+ }
+
+ return 0;
+}
+
+static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s", mdp4_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp4_enable(get_kms(crtc));
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp4_disable(get_kms(crtc));
+}
+
+static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane = mdp4_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+
+ update_fb(crtc, false, crtc->fb);
+
+ return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+
+ if (mdp4_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ mdp4_crtc->event = event;
+ update_fb(crtc, true, new_fb);
+
+ return msm_gem_queue_inactive_work(obj,
+ &mdp4_crtc->pageflip_work);
+}
+
+static int mdp4_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed). The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ if (mdp4_crtc->cursor.stale) {
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+ struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+ uint32_t iova = mdp4_crtc->cursor.next_iova;
+
+ if (next_bo) {
+ /* take a obj ref + iova ref when we start scanning out: */
+ drm_gem_object_reference(next_bo);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+
+ /* enable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+ MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+ MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+ MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+ } else {
+ /* disable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+ }
+
+ /* and drop the iova ref + obj rev when done scanning out: */
+ if (prev_bo)
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+ mdp4_crtc->cursor.scanout_bo = next_bo;
+ mdp4_crtc->cursor.stale = false;
+ }
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *cursor_bo, *old_bo;
+ unsigned long flags;
+ uint32_t iova;
+ int ret;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (handle) {
+ cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+ } else {
+ cursor_bo = NULL;
+ }
+
+ if (cursor_bo) {
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ if (ret)
+ goto fail;
+ } else {
+ iova = 0;
+ }
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ old_bo = mdp4_crtc->cursor.next_bo;
+ mdp4_crtc->cursor.next_bo = cursor_bo;
+ mdp4_crtc->cursor.next_iova = iova;
+ mdp4_crtc->cursor.width = width;
+ mdp4_crtc->cursor.height = height;
+ mdp4_crtc->cursor.stale = true;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ if (old_bo) {
+ /* drop our previous reference: */
+ msm_gem_put_iova(old_bo, mdp4_kms->id);
+ drm_gem_object_unreference_unlocked(old_bo);
+ }
+
+ return 0;
+
+fail:
+ drm_gem_object_unreference_unlocked(cursor_bo);
+ return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(x) |
+ MDP4_DMA_CURSOR_POS_Y(y));
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp4_crtc_destroy,
+ .page_flip = mdp4_crtc_page_flip,
+ .set_property = mdp4_crtc_set_property,
+ .cursor_set = mdp4_crtc_cursor_set,
+ .cursor_move = mdp4_crtc_cursor_move,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+ .dpms = mdp4_crtc_dpms,
+ .mode_fixup = mdp4_crtc_mode_fixup,
+ .mode_set = mdp4_crtc_mode_set,
+ .prepare = mdp4_crtc_prepare,
+ .commit = mdp4_crtc_commit,
+ .mode_set_base = mdp4_crtc_mode_set_base,
+ .load_lut = mdp4_crtc_load_lut,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ update_cursor(crtc);
+ complete_flip(crtc, false);
+ mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
+
+ drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+}
+
+static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ return mdp4_crtc->vblank.irqmask;
+}
+
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
+{
+ complete_flip(crtc, true);
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t intf_sel;
+
+ intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+ switch (mdp4_crtc->dma) {
+ case DMA_P:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+ break;
+ case DMA_S:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+ break;
+ case DMA_E:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+ break;
+ }
+
+ if (intf == INTF_DSI_VIDEO) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_DSI_CMD) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_LCDC_DTV){
+ mdp4_crtc->mixer = 1;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+static const char *dma_names[] = {
+ "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp4_crtc *mdp4_crtc;
+ int ret;
+
+ mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+ if (!mdp4_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp4_crtc->base;
+
+ mdp4_crtc->plane = plane;
+ mdp4_crtc->plane->crtc = crtc;
+
+ mdp4_crtc->ovlp = ovlp_id;
+ mdp4_crtc->dma = dma_id;
+
+ mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+ mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+ mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+ mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+ snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+ dma_names[dma_id], ovlp_id);
+
+ spin_lock_init(&mdp4_crtc->cursor.lock);
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+ "unref cursor", unref_cursor_worker);
+
+ INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
+
+ drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+ mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp4_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 00000000000..5e0dcae70ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <mach/clk.h>
+
+#include "mdp4_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_dtv_encoder {
+ struct drm_encoder base;
+ struct clk *src_clk;
+ struct clk *hdmi_clk;
+ struct clk *mdp_clk;
+ unsigned long int pixclock;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+/* not ironically named at all.. no, really.. */
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ struct drm_device *dev = mdp4_dtv_encoder->base.dev;
+ struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
+
+ if (!dtv_pdata) {
+ dev_err(dev->dev, "could not find dtv pdata\n");
+ return;
+ }
+
+ if (dtv_pdata->bus_scale_table) {
+ mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
+ dtv_pdata->bus_scale_table);
+ DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
+ DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
+ if (dtv_pdata->lcdc_power_save)
+ dtv_pdata->lcdc_power_save(1);
+ }
+}
+
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
+ mdp4_dtv_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
+#endif
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ bs_fini(mdp4_dtv_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+ .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp4_dtv_encoder->enabled)
+ return;
+
+ if (enabled) {
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ bs_set(mdp4_dtv_encoder, 1);
+
+ DBG("setting src_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ if (ret)
+ dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+ clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+ } else {
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ bs_set(mdp4_dtv_encoder, 0);
+ }
+
+ mdp4_dtv_encoder->enabled = enabled;
+}
+
+static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+ MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+ MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+ MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+ MDP4_DTV_ACTIVE_HCTL_START(0) |
+ MDP4_DTV_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+{
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+ .dpms = mdp4_dtv_encoder_dpms,
+ .mode_fixup = mdp4_dtv_encoder_mode_fixup,
+ .mode_set = mdp4_dtv_encoder_mode_set,
+ .prepare = mdp4_dtv_encoder_prepare,
+ .commit = mdp4_dtv_encoder_commit,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+ int ret;
+
+ mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+ if (!mdp4_dtv_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dtv_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+ mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
+ if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
+ dev_err(dev->dev, "failed to get src_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+ if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+ dev_err(dev->dev, "failed to get hdmi_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+ dev_err(dev->dev, "failed to get mdp_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+ goto fail;
+ }
+
+ bs_init(mdp4_dtv_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dtv_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 00000000000..7b645f2e837
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
+ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+ .bpc_a = BPC ## a ## A, \
+ .bpc_r = BPC ## r, \
+ .bpc_g = BPC ## g, \
+ .bpc_b = BPC ## b, \
+ .unpack = { e0, e1, e2, e3 }, \
+ .alpha_enable = alpha, \
+ .unpack_tight = tight, \
+ .cpp = c, \
+ .unpack_count = cnt, \
+ }
+
+#define BPC0A 0
+
+static const struct mdp4_format formats[] = {
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
+};
+
+const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp4_format *f = &formats[i];
+ if (f->base.pixel_format == format)
+ return &f->base;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 00000000000..5c6b7fca4ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+
+struct mdp4_irq_wait {
+ struct mdp4_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp4_kms *mdp4_kms)
+{
+ struct mdp4_irq *irq;
+ uint32_t irqmask = mdp4_kms->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &mdp4_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
+
+ INIT_LIST_HEAD(&mdp4_kms->irq_list);
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp4_irq_register(mdp4_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp4_irq *handler, *n;
+ unsigned long flags;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp4_kms->in_irq = false;
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_irq_wait *wait =
+ container_of(irq, struct mdp4_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
+{
+ struct mdp4_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp4_irq_register(mdp4_kms, &wait.irq);
+ wait_event(wait_event, (wait.count <= 0));
+ mdp4_irq_unregister(mdp4_kms, &wait.irq);
+}
+
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp4_kms->irq_list);
+ needs_update = !mdp4_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp4_kms);
+}
+
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp4_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp4_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 00000000000..5db5bbaedae
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+#include <mach/iommu.h>
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ uint32_t version, major, minor, dmap_cfg, vg_cfg;
+ unsigned long clk;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+
+ major = FIELD(version, MDP4_VERSION_MAJOR);
+ minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DBG("found MDP version v%d.%d", major, minor);
+
+ if (major != 4) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->dsi_pll_vdda) {
+ if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
+ 1200000, 1200000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vdda voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->dsi_pll_vddio) {
+ if (mdp4_kms->rev == 2) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
+ 1800000, 1800000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vddio voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->rev > 1) {
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+ /* max read pending cmd config, 3 pending requests: */
+ mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+ clk = clk_get_rate(mdp4_kms->clk);
+
+ if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+ dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
+ vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
+ } else {
+ dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
+ vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
+ }
+
+ DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+ if (mdp4_kms->rev >= 2)
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+
+ /* disable CSC matrix / YUV by default: */
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+ if (mdp4_kms->rev > 1)
+ mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ /* if we had >1 encoder, we'd need something more clever: */
+ return mdp4_dtv_round_pixclk(encoder, rate);
+}
+
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ kfree(mdp4_kms);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = mdp4_hw_init,
+ .irq_preinstall = mdp4_irq_preinstall,
+ .irq_postinstall = mdp4_irq_postinstall,
+ .irq_uninstall = mdp4_irq_uninstall,
+ .irq = mdp4_irq,
+ .enable_vblank = mdp4_enable_vblank,
+ .disable_vblank = mdp4_disable_vblank,
+ .get_format = mdp4_get_format,
+ .round_pixclk = mdp4_round_pixclk,
+ .preclose = mdp4_preclose,
+ .destroy = mdp4_destroy,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret;
+
+ /*
+ * NOTE: this is a bit simplistic until we add support
+ * for more than just RGB1->DMA_E->DTV->HDMI
+ */
+
+ /* the CRTCs get constructed with a private plane: */
+ plane = mdp4_plane_init(dev, RGB1, true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for RGB1\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct DTV encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+ encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ ret = hdmi_init(dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static const char *iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct mdp4_kms *mdp4_kms;
+ struct msm_kms *kms = NULL;
+ int ret;
+
+ mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ kms = &mdp4_kms->base;
+ kms->funcs = &kms_funcs;
+
+ mdp4_kms->dev = dev;
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
+ if (IS_ERR(mdp4_kms->mmio)) {
+ ret = PTR_ERR(mdp4_kms->mmio);
+ goto fail;
+ }
+
+ mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ if (IS_ERR(mdp4_kms->dsi_pll_vdda))
+ mdp4_kms->dsi_pll_vdda = NULL;
+
+ mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ if (IS_ERR(mdp4_kms->dsi_pll_vddio))
+ mdp4_kms->dsi_pll_vddio = NULL;
+
+ mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ if (mdp4_kms->vdd) {
+ ret = regulator_enable(mdp4_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk)) {
+ dev_err(dev->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdp4_kms->clk);
+ goto fail;
+ }
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ // XXX if (rev >= MDP_REV_42) { ???
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ dev_err(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
+
+ clk_set_rate(mdp4_kms->clk, config->max_clk);
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+
+ if (!config->iommu) {
+ dev_err(dev->dev, "no iommu\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdelay(16);
+
+ ret = msm_iommu_attach(dev, config->iommu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+
+ mdp4_kms->id = msm_register_iommu(dev, config->iommu);
+ if (mdp4_kms->id < 0) {
+ ret = mdp4_kms->id;
+ dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp4_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp4_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
+{
+ static struct mdp4_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ if (cpu_is_apq8064())
+ config.max_clk = 266667000;
+ else
+ config.max_clk = 200000000;
+
+ config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 00000000000..1e83554955f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "mdp4.xml.h"
+
+
+/* For transiently registering for different MDP4 irqs that various parts
+ * of the KMS code need during setup/configuration. We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp4_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
+};
+
+struct mdp4_kms {
+ struct msm_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+
+ void __iomem *mmio;
+
+ struct regulator *dsi_pll_vdda;
+ struct regulator *dsi_pll_vddio;
+ struct regulator *vdd;
+
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *lut_clk;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ struct mdp4_irq error_handler;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp4_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+};
+
+struct mdp4_format {
+ struct msm_format base;
+ enum mpd4_bpc bpc_r, bpc_g, bpc_b;
+ enum mpd4_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+};
+#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+ return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1: return MDP4_OVERLAY_FLUSH_VG1;
+ case VG2: return MDP4_OVERLAY_FLUSH_VG2;
+ case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
+ case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+ switch (ovlp) {
+ case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
+ case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_DMA_P_DONE;
+ case DMA_S: return MDP4_IRQ_DMA_S_DONE;
+ case DMA_E: return MDP4_IRQ_DMA_E_DONE;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+ case DMA_S: return 0; // ???
+ case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+ default: return 0;
+ }
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
+
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mpd4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static inline int match_dev_name(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), data);
+}
+/* bus scaling data is associated with extra pointless platform devices,
+ * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
+ * to find their pdata to make the bus-scaling stuff work.
+ */
+static inline void *mdp4_find_pdata(const char *devname)
+{
+ struct device *dev;
+ dev = bus_find_device(&platform_bus_type, NULL,
+ (void *)devname, match_dev_name);
+ return dev ? dev->platform_data : NULL;
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 00000000000..3468229d58b
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+
+struct mdp4_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mpd4_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+static int mdp4_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp4_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp4_plane_disable(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ DBG("%s: TODO", mdp4_plane->name); // XXX
+ return 0;
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp4_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+ .update_plane = mdp4_plane_update,
+ .disable_plane = mdp4_plane_disable,
+ .destroy = mdp4_plane_destroy,
+ .set_property = mdp4_plane_set_property,
+};
+
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mpd4_pipe pipe = mdp4_plane->pipe;
+ uint32_t iova;
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+ MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+ MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+
+ plane->fb = fb;
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
+
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mpd4_pipe pipe = mdp4_plane->pipe;
+ const struct mdp4_format *format;
+ uint32_t op_mode = 0;
+ uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ if (src_w != crtc_w) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+ /* TODO calc phasex_step */
+ }
+
+ if (src_h != crtc_h) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+ /* TODO calc phasey_step */
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+ MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(src_x) |
+ MDP4_PIPE_SRC_XY_Y(src_y));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+ MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+ MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(crtc_x) |
+ MDP4_PIPE_SRC_XY_Y(crtc_y));
+
+ mdp4_plane_set_scanout(plane, fb);
+
+ format = to_mdp4_format(msm_framebuffer_format(fb));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+ MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+ plane->crtc = crtc;
+
+ return 0;
+}
+
+static const char *pipe_names[] = {
+ "VG1", "VG2",
+ "RGB1", "RGB2", "RGB3",
+ "VG3", "VG4",
+};
+
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ return mdp4_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mpd4_pipe pipe_id, bool private_plane)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane = NULL;
+ struct mdp4_plane *mdp4_plane;
+ int ret;
+
+ mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+ if (!mdp4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp4_plane->base;
+
+ mdp4_plane->pipe = pipe_id;
+ mdp4_plane->name = pipe_names[pipe_id];
+
+ drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats, private_plane);
+
+ mdp4_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 00000000000..864c9773636
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+#include <mach/iommu.h>
+
+static void msm_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = msm_framebuffer_create,
+ .output_poll_changed = msm_fb_output_poll_changed,
+};
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return 0;
+}
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int idx = priv->num_iommus++;
+
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+ return -EINVAL;
+
+ priv->iommus[idx] = iommu;
+
+ iommu_set_fault_handler(iommu, msm_fault_handler, dev);
+
+ /* need to iommu_attach_device() somewhere?? on resume?? */
+
+ return idx;
+}
+
+int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
+ const char **names, int cnt)
+{
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (!ctx)
+ continue;
+ ret = iommu_attach_device(iommu, ctx);
+ if (ret) {
+ dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
+static bool reglog = false;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ struct resource *res;
+ unsigned long size;
+ void __iomem *ptr;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ if (!ptr) {
+ dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (reglog)
+ printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
+
+ return ptr;
+}
+
+void msm_writel(u32 data, void __iomem *addr)
+{
+ if (reglog)
+ printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+ writel(data, addr);
+}
+
+u32 msm_readl(const void __iomem *addr)
+{
+ u32 val = readl(addr);
+ if (reglog)
+ printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+ return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+static int msm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_gpu *gpu = priv->gpu;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ if (kms) {
+ pm_runtime_disable(dev->dev);
+ kms->funcs->destroy(kms);
+ }
+
+ if (gpu) {
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->destroy(gpu);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+
+ drm_mode_config_init(dev);
+
+ kms = mdp4_kms_init(dev);
+ if (IS_ERR(kms)) {
+ /*
+ * NOTE: once we have GPU support, having no kms should not
+ * be considered fatal.. ideally we would still support gpu
+ * and (for example) use dmabuf/prime to share buffers with
+ * imx drm driver on iMX5
+ */
+ dev_err(dev->dev, "failed to load kms\n");
+ ret = PTR_ERR(priv->kms);
+ goto fail;
+ }
+
+ priv->kms = kms;
+
+ if (kms) {
+ pm_runtime_enable(dev->dev);
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+#ifdef CONFIG_DRM_MSM_FBDEV
+ priv->fbdev = msm_fbdev_init(dev);
+#endif
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ msm_unload(dev);
+ return ret;
+}
+
+static void load_gpu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu;
+
+ if (priv->gpu)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ gpu = a3xx_gpu_init(dev);
+ if (IS_ERR(gpu)) {
+ dev_warn(dev->dev, "failed to load a3xx gpu\n");
+ gpu = NULL;
+ /* not fatal */
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (gpu) {
+ int ret;
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ gpu->funcs->destroy(gpu);
+ gpu = NULL;
+ }
+ }
+
+ priv->gpu = gpu;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx;
+
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->driver_priv = ctx;
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms)
+ kms->funcs->preclose(kms, file);
+
+ mutex_lock(&dev->struct_mutex);
+ if (ctx == priv->lastctx)
+ priv->lastctx = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(ctx);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev) {
+ drm_modeset_lock_all(dev);
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+ }
+}
+
+static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq_postinstall(kms);
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_uninstall(kms);
+}
+
+static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, m);
+ }
+
+ return 0;
+}
+
+static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "Active Objects (%s):\n", gpu->name);
+ msm_gem_describe_objects(&gpu->active_list, m);
+ }
+
+ seq_printf(m, "Inactive Objects:\n");
+ msm_gem_describe_objects(&priv->inactive_list, m);
+
+ return 0;
+}
+
+static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (priv->fbdev) {
+ seq_printf(m, "fbcon ");
+ fbdev_fb = priv->fbdev->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int show_locked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = show(dev, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+ {"gpu", show_locked, 0, msm_gpu_show},
+ {"gem", show_locked, 0, msm_gem_show},
+ { "mm", show_locked, 0, msm_mm_show },
+ { "fb", show_locked, 0, msm_fb_show },
+};
+
+static int msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list), minor);
+}
+#endif
+
+/*
+ * Fences:
+ */
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+ int ret;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ priv->completed_fence >= fence,
+ remaining_jiffies);
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, priv->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* call under struct_mutex */
+void msm_update_fence(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (fence > priv->completed_fence) {
+ priv->completed_fence = fence;
+ wake_up_all(&priv->fence_event);
+ }
+}
+
+/*
+ * DRM ioctls:
+ */
+
+static int msm_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_param *args = data;
+ struct msm_gpu *gpu;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (!gpu)
+ return -ENXIO;
+
+ return gpu->funcs->get_param(gpu, args->param, &args->value);
+}
+
+static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_new *args = data;
+ return msm_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle);
+}
+
+#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
+
+static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_fini(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_wait_fence *args = data;
+ return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
+}
+
+static const struct drm_ioctl_desc msm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = msm_gem_mmap,
+};
+
+static struct drm_driver msm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = msm_load,
+ .unload = msm_unload,
+ .open = msm_open,
+ .preclose = msm_preclose,
+ .lastclose = msm_lastclose,
+ .irq_handler = msm_irq,
+ .irq_preinstall = msm_irq_preinstall,
+ .irq_postinstall = msm_irq_postinstall,
+ .irq_uninstall = msm_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = msm_enable_vblank,
+ .disable_vblank = msm_disable_vblank,
+ .gem_free_object = msm_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .dumb_destroy = msm_gem_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+ .debugfs_cleanup = msm_debugfs_cleanup,
+#endif
+ .ioctls = msm_ioctls,
+ .num_ioctls = DRM_MSM_NUM_IOCTLS,
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20130625",
+ .major = 1,
+ .minor = 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(ddev);
+
+ return 0;
+}
+
+static int msm_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&msm_driver, pdev);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&msm_driver, pdev);
+
+ return 0;
+}
+
+static const struct platform_device_id msm_id[] = {
+ { "mdp", 0 },
+ { }
+};
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_probe,
+ .remove = msm_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msm",
+ .pm = &msm_pm_ops,
+ },
+ .id_table = msm_id,
+};
+
+static int __init msm_drm_register(void)
+{
+ DBG("init");
+ hdmi_register();
+ a3xx_register();
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+ hdmi_unregister();
+ a3xx_unregister();
+}
+
+module_init(msm_drm_register);
+module_exit(msm_drm_unregister);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("MSM DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 00000000000..80d75094bf0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRV_H__
+#define __MSM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <asm/sizes.h>
+
+#ifndef CONFIG_OF
+#include <mach/board.h>
+#include <mach/socinfo.h>
+#include <mach/iommu_domains.h>
+#endif
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/msm_drm.h>
+
+struct msm_kms;
+struct msm_gpu;
+
+#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+
+struct msm_file_private {
+ /* currently we don't do anything useful with this.. but when
+ * per-context address spaces are supported we'd keep track of
+ * the context's page-tables here.
+ */
+ int dummy;
+};
+
+struct msm_drm_private {
+
+ struct msm_kms *kms;
+
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+ struct msm_file_private *lastctx;
+
+ struct drm_fb_helper *fbdev;
+
+ uint32_t next_fence, completed_fence;
+ wait_queue_head_t fence_event;
+
+ /* list of GEM objects: */
+ struct list_head inactive_list;
+
+ struct workqueue_struct *wq;
+
+ /* registered IOMMU domains: */
+ unsigned int num_iommus;
+ struct iommu_domain *iommus[NUM_DOMAINS];
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_bridges;
+ struct drm_bridge *bridges[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+};
+
+struct msm_format {
+ uint32_t pixel_format;
+};
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* misc: */
+ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ /* cleanup: */
+ void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
+int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
+ const char **names, int cnt);
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout);
+void msm_update_fence(struct drm_device *dev, uint32_t fence);
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_vaddr(struct drm_gem_object *obj);
+int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
+ struct work_struct *work);
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, uint32_t fence);
+void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout);
+int msm_gem_cpu_fini(struct drm_gem_object *obj);
+void msm_gem_free_object(struct drm_gem_object *obj);
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+
+int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+void __init hdmi_register(void);
+void __exit hdmi_unregister(void);
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+#endif
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname);
+void msm_writel(u32 data, void __iomem *addr);
+u32 msm_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+static inline int align_pitch(int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* adreno needs pitch aligned to 32 pixels: */
+ return bytespp * ALIGN(width, 32);
+}
+
+/* for the generated headers: */
+#define INVALID_IDX(idx) ({BUG(); 0;})
+#define fui(x) ({BUG(); 0;})
+#define util_float_to_half(x) ({BUG(); 0;})
+
+
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* for conditionally setting boolean flag(s): */
+#define COND(bool, val) ((bool) ? (val) : 0)
+
+
+#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 00000000000..0286c0eeb10
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct msm_framebuffer {
+ struct drm_framebuffer base;
+ const struct msm_format *format;
+ struct drm_gem_object *planes[2];
+};
+#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+
+
+static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return drm_gem_handle_create(file_priv,
+ msm_fb->planes[0], handle);
+}
+
+static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < n; i++) {
+ struct drm_gem_object *bo = msm_fb->planes[i];
+ if (bo)
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ kfree(msm_fb);
+}
+
+static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
+ .create_handle = msm_framebuffer_create_handle,
+ .destroy = msm_framebuffer_destroy,
+ .dirty = msm_framebuffer_dirty,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
+ fb->width, fb->height, (char *)&fb->pixel_format,
+ fb->refcount.refcount.counter, fb->base.id);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ msm_gem_describe(msm_fb->planes[i], m);
+ }
+}
+#endif
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->planes[plane];
+}
+
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->format;
+}
+
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *bos[4] = {0};
+ struct drm_framebuffer *fb;
+ int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(dev, file,
+ mode_cmd->handles[i]);
+ if (!bos[i]) {
+ ret = -ENXIO;
+ goto out_unref;
+ }
+ }
+
+ fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto out_unref;
+ }
+
+ return fb;
+
+out_unref:
+ for (i = 0; i < n; i++)
+ drm_gem_object_unreference_unlocked(bos[i]);
+ return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_framebuffer *msm_fb;
+ struct drm_framebuffer *fb = NULL;
+ const struct msm_format *format;
+ int ret, i, n;
+ unsigned int hsub, vsub;
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ n = drm_format_num_planes(mode_cmd->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ if (!format) {
+ dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
+ if (!msm_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &msm_fb->base;
+
+ msm_fb->format = format;
+
+ for (i = 0; i < n; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb->planes[i] = bos[i];
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ if (fb)
+ msm_framebuffer_destroy(fb);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 00000000000..6c6d7d4c9b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
+
+struct msm_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+};
+
+static struct fb_ops msm_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int msm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ dma_addr_t paddr;
+ int ret, size;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = align_pitch(
+ mode_cmd.width, sizes->surface_bpp);
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ mutex_lock(&dev->struct_mutex);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(fbdev->bo)) {
+ ret = PTR_ERR(fbdev->bo);
+ fbdev->bo = NULL;
+ dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
+ goto fail;
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference(fbdev->bo);
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* TODO implement our own fb_mmap so we don't need this: */
+ msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &msm_fb_ops;
+
+ strcpy(fbi->fix.id, "msm");
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_size = fbdev->bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = fbdev->bo->size;
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+
+ return ret;
+}
+
+static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+ .gamma_set = msm_crtc_fb_gamma_set,
+ .gamma_get = msm_crtc_fb_gamma_get,
+ .fb_probe = msm_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ goto fail;
+
+ helper = &fbdev->base;
+
+ helper->funcs = &msm_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void msm_fbdev_free(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct msm_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_msm_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 00000000000..6b5a6c8c765
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+
+
+/* called with dev->struct_mutex held */
+static struct page **get_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->pages) {
+ struct drm_device *dev = obj->dev;
+ struct page **p = drm_gem_get_pages(obj, 0);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (IS_ERR(p)) {
+ dev_err(dev->dev, "could not get pages: %ld\n",
+ PTR_ERR(p));
+ return p;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (!msm_obj->sgt) {
+ dev_err(dev->dev, "failed to allocate sgt\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ msm_obj->pages = p;
+
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+
+ return msm_obj->pages;
+}
+
+static void put_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->pages) {
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ msm_obj->pages = NULL;
+ }
+}
+
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (msm_obj->flags & MSM_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (msm_obj->flags & MSM_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ get_file(obj->filp);
+ vma->vm_pgoff = 0;
+ vma->vm_file = obj->filp;
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return 0;
+}
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ unsigned long pfn;
+ pgoff_t pgoff;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
+
+ /* make sure we have pages attached now */
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out_unlock;
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(msm_obj->pages[pgoff]);
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+
+ return drm_vma_node_offset_addr(&obj->vma_node);
+}
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
+/* helpers for dealing w/ iommu: */
+static int map_range(struct iommu_domain *domain, unsigned int iova,
+ struct sg_table *sgt, unsigned int len, int prot)
+{
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static void unmap_range(struct iommu_domain *domain, unsigned int iova,
+ struct sg_table *sgt, unsigned int len)
+{
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ break;
+
+ VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+ da += bytes;
+ }
+}
+
+/* should be called under struct_mutex.. although it can be called
+ * from atomic context without struct_mutex to acquire an extra
+ * iova ref if you know one is already held.
+ *
+ * That means when I do eventually need to add support for unpinning
+ * the refcnt counter needs to be atomic_t.
+ */
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (!msm_obj->domain[id].iova) {
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ struct page **pages;
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ // XXX ideally we would not map buffers writable when not needed...
+ ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
+ obj->size, IOMMU_READ | IOMMU_WRITE);
+ msm_obj->domain[id].iova = offset;
+ }
+
+ if (!ret)
+ *iova = msm_obj->domain[id].iova;
+
+ return ret;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+{
+ int ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_get_iova_locked(obj, id, iova);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+{
+ // XXX TODO ..
+ // NOTE: probably don't need a _locked() version.. we wouldn't
+ // normally unmap here, but instead just mark that it could be
+ // unmapped (if the iova refcnt drops to zero), but then later
+ // if another _get_iova_locked() fails we can start unmapping
+ // things that are no longer needed..
+}
+
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = align_pitch(args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+ return msm_gem_new_handle(dev, file, args->size,
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+}
+
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ if (!msm_obj->vaddr) {
+ struct page **pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+ msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return msm_obj->vaddr;
+}
+
+void *msm_gem_vaddr(struct drm_gem_object *obj)
+{
+ void *ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
+ struct work_struct *work)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&work->entry)) {
+ ret = -EINVAL;
+ } else if (is_active(msm_obj)) {
+ list_add_tail(&work->entry, &msm_obj->inactive_work);
+ } else {
+ queue_work(priv->wq, work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_obj->gpu = gpu;
+ msm_obj->fence = fence;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+}
+
+void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ msm_obj->gpu = NULL;
+ msm_obj->fence = 0;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ while (!list_empty(&msm_obj->inactive_work)) {
+ struct work_struct *work;
+
+ work = list_first_entry(&msm_obj->inactive_work,
+ struct work_struct, entry);
+
+ list_del_init(&work->entry);
+ queue_work(priv->wq, work);
+ }
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
+ ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+
+ /* TODO cache maintenance */
+
+ return ret;
+}
+
+int msm_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ /* TODO cache maintenance */
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ uint64_t off = drm_vma_node_start(&obj->vma_node);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+ msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+ off, msm_obj->vaddr, obj->size);
+}
+
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct msm_gem_object *msm_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(msm_obj, list, mm_list) {
+ struct drm_gem_object *obj = &msm_obj->base;
+ seq_printf(m, " ");
+ msm_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+void msm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* object should not be on active list: */
+ WARN_ON(is_active(msm_obj));
+
+ list_del(&msm_obj->mm_list);
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ if (msm_obj->domain[id].iova) {
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ unmap_range(priv->iommus[id], offset,
+ msm_obj->sgt, obj->size);
+ }
+ }
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (msm_obj->vaddr)
+ vunmap(msm_obj->vaddr);
+
+ put_pages(obj);
+
+ if (msm_obj->resv == &msm_obj->_resv)
+ reservation_object_fini(msm_obj->resv);
+
+ drm_gem_object_release(obj);
+
+ kfree(msm_obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = msm_gem_new(dev, size, flags);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj = NULL;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ size = PAGE_ALIGN(size);
+
+ switch (flags & MSM_BO_CACHE_MASK) {
+ case MSM_BO_UNCACHED:
+ case MSM_BO_CACHED:
+ case MSM_BO_WC:
+ break;
+ default:
+ dev_err(dev->dev, "invalid cache flag: %x\n",
+ (flags & MSM_BO_CACHE_MASK));
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ if (!msm_obj) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ obj = &msm_obj->base;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+
+ msm_obj->flags = flags;
+
+ msm_obj->resv = &msm_obj->_resv;
+ reservation_object_init(msm_obj->resv);
+
+ INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->inactive_work);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 00000000000..d746f13d283
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GEM_H__
+#define __MSM_GEM_H__
+
+#include <linux/reservation.h>
+#include "msm_drv.h"
+
+struct msm_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /* And object is either:
+ * inactive - on priv->inactive_list
+ * active - on one one of the gpu's active_list.. well, at
+ * least for now we don't have (I don't think) hw sync between
+ * 2d and 3d one devices which have both, meaning we need to
+ * block on submit if a bo is already on other ring
+ *
+ */
+ struct list_head mm_list;
+ struct msm_gpu *gpu; /* non-null if active */
+ uint32_t fence;
+
+ /* Transiently in the process of submit ioctl, objects associated
+ * with the submit are on submit->bo_list.. this only lasts for
+ * the duration of the ioctl, so one bo can never be on multiple
+ * submit lists.
+ */
+ struct list_head submit_entry;
+
+ /* work defered until bo is inactive: */
+ struct list_head inactive_work;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct {
+ // XXX
+ uint32_t iova;
+ } domain[NUM_DOMAINS];
+
+ /* normally (resv == &_resv) except for imported bo's */
+ struct reservation_object *resv;
+ struct reservation_object _resv;
+};
+#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+
+static inline bool is_active(struct msm_gem_object *msm_obj)
+{
+ return msm_obj->gpu != NULL;
+}
+
+#define MAX_CMDS 4
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc). This only
+ * lasts for the duration of the submit-ioctl.
+ */
+struct msm_gem_submit {
+ struct drm_device *dev;
+ struct msm_gpu *gpu;
+ struct list_head bo_list;
+ struct ww_acquire_ctx ticket;
+ uint32_t fence;
+ bool valid;
+ unsigned int nr_cmds;
+ unsigned int nr_bos;
+ struct {
+ uint32_t type;
+ uint32_t size; /* in dwords */
+ uint32_t iova;
+ } cmd[MAX_CMDS];
+ struct {
+ uint32_t flags;
+ struct msm_gem_object *obj;
+ uint32_t iova;
+ } bos[0];
+};
+
+#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 00000000000..3e1ef3a00f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
+#define BO_VALID 0x8000
+#define BO_LOCKED 0x4000
+#define BO_PINNED 0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ struct msm_gpu *gpu, int nr)
+{
+ struct msm_gem_submit *submit;
+ int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (submit) {
+ submit->dev = dev;
+ submit->gpu = gpu;
+
+ /* initially, until copy_from_user() and bo lookup succeeds: */
+ submit->nr_bos = 0;
+ submit->nr_cmds = 0;
+
+ INIT_LIST_HEAD(&submit->bo_list);
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ }
+
+ return submit;
+}
+
+static int submit_lookup_objects(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i;
+ int ret = 0;
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_msm_gem_submit_bo submit_bo;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ void __user *userptr =
+ to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (submit_bo.flags & BO_INVALID_FLAGS) {
+ DBG("invalid flags: %x", submit_bo.flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->bos[i].flags = submit_bo.flags;
+ /* in validate_objects() we figure out if this is true: */
+ submit->bos[i].iova = submit_bo.presumed;
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, submit_bo.handle);
+ if (!obj) {
+ DBG("invalid handle %u at index %u", submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ msm_obj = to_msm_bo(obj);
+
+ if (!list_empty(&msm_obj->submit_entry)) {
+ DBG("handle %u at index %u already on submit list",
+ submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ drm_gem_object_reference(obj);
+
+ submit->bos[i].obj = msm_obj;
+
+ list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
+ }
+
+out_unlock:
+ submit->nr_bos = i;
+ spin_unlock(&file->table_lock);
+
+ return ret;
+}
+
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & BO_PINNED)
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+
+ if (submit->bos[i].flags & BO_LOCKED)
+ ww_mutex_unlock(&msm_obj->resv->lock);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
+
+ submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+}
+
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_validate_objects(struct msm_gem_submit *submit)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ submit->valid = true;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ uint32_t iova;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+
+
+ /* if locking succeeded, pin bo: */
+ ret = msm_gem_get_iova(&msm_obj->base,
+ submit->gpu->id, &iova);
+
+ /* this would break the logic in the fail path.. there is no
+ * reason for this to happen, but just to be on the safe side
+ * let's notice if this starts happening in the future:
+ */
+ WARN_ON(ret == -EDEADLK);
+
+ if (ret)
+ goto fail;
+
+ submit->bos[i].flags |= BO_PINNED;
+
+ if (iova == submit->bos[i].iova) {
+ submit->bos[i].flags |= BO_VALID;
+ } else {
+ submit->bos[i].iova = iova;
+ submit->bos[i].flags &= ~BO_VALID;
+ submit->valid = false;
+ }
+ }
+
+ ww_acquire_done(&submit->ticket);
+
+ return 0;
+
+fail:
+ for (; i >= 0; i--)
+ submit_unlock_unpin_bo(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_unpin_bo(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct msm_gem_object *msm_obj = submit->bos[contended].obj;
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+ struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+{
+ if (idx >= submit->nr_bos) {
+ DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
+ return EINVAL;
+ }
+
+ if (obj)
+ *obj = submit->bos[idx].obj;
+ if (iova)
+ *iova = submit->bos[idx].iova;
+ if (valid)
+ *valid = !!(submit->bos[idx].flags & BO_VALID);
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+ uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+{
+ uint32_t i, last_offset = 0;
+ uint32_t *ptr;
+ int ret;
+
+ if (offset % 4) {
+ DBG("non-aligned cmdstream buffer: %u", offset);
+ return -EINVAL;
+ }
+
+ /* For now, just map the entire thing. Eventually we probably
+ * to do it page-by-page, w/ kmap() if not vmap()d..
+ */
+ ptr = msm_gem_vaddr(&obj->base);
+
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ DBG("failed to map: %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_msm_gem_submit_reloc submit_reloc;
+ void __user *userptr =
+ to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ uint32_t iova, off;
+ bool valid;
+
+ ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
+ if (ret)
+ return -EFAULT;
+
+ if (submit_reloc.submit_offset % 4) {
+ DBG("non-aligned reloc offset: %u",
+ submit_reloc.submit_offset);
+ return -EINVAL;
+ }
+
+ /* offset in dwords: */
+ off = submit_reloc.submit_offset / 4;
+
+ if ((off >= (obj->base.size / 4)) ||
+ (off < last_offset)) {
+ DBG("invalid offset %u at reloc %u", off, i);
+ return -EINVAL;
+ }
+
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ if (ret)
+ return ret;
+
+ if (valid)
+ continue;
+
+ iova += submit_reloc.reloc_offset;
+
+ if (submit_reloc.shift < 0)
+ iova >>= -submit_reloc.shift;
+ else
+ iova <<= submit_reloc.shift;
+
+ ptr[off] = iova | submit_reloc.or;
+
+ last_offset = off;
+ }
+
+ return 0;
+}
+
+static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+{
+ unsigned i;
+
+ mutex_lock(&submit->dev->struct_mutex);
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ submit_unlock_unpin_bo(submit, i);
+ list_del_init(&msm_obj->submit_entry);
+ drm_gem_object_unreference(&msm_obj->base);
+ }
+ mutex_unlock(&submit->dev->struct_mutex);
+
+ ww_acquire_fini(&submit->ticket);
+ kfree(submit);
+}
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_gem_submit *args = data;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_submit *submit;
+ struct msm_gpu *gpu;
+ unsigned i;
+ int ret;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (args->nr_cmds > MAX_CMDS)
+ return -EINVAL;
+
+ submit = submit_create(dev, gpu, args->nr_bos);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = submit_lookup_objects(submit, args, file);
+ if (ret)
+ goto out;
+
+ ret = submit_validate_objects(submit);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd submit_cmd;
+ void __user *userptr =
+ to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ struct msm_gem_object *msm_obj;
+ uint32_t iova;
+
+ ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = submit_bo(submit, submit_cmd.submit_idx,
+ &msm_obj, &iova, NULL);
+ if (ret)
+ goto out;
+
+ if (submit_cmd.size % 4) {
+ DBG("non-aligned cmdstream buffer size: %u",
+ submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (submit_cmd.size >= msm_obj->base.size) {
+ DBG("invalid cmdstream size: %u", submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].type = submit_cmd.type;
+ submit->cmd[i].size = submit_cmd.size / 4;
+ submit->cmd[i].iova = iova + submit_cmd.submit_offset;
+
+ if (submit->valid)
+ continue;
+
+ ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
+ submit_cmd.nr_relocs, submit_cmd.relocs);
+ if (ret)
+ goto out;
+ }
+
+ submit->nr_cmds = i;
+
+ ret = msm_gpu_submit(gpu, submit, ctx);
+
+ args->fence = submit->fence;
+
+out:
+ if (submit)
+ submit_cleanup(submit, !!ret);
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 00000000000..e1e1ec9321f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+
+/*
+ * Power Management:
+ */
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/kgsl.h>
+static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct drm_device *dev = gpu->dev;
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdev) {
+ dev_err(dev->dev, "could not find dtv pdata\n");
+ return;
+ }
+
+ if (pdata->bus_scale_table) {
+ gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+ DBG("bus scale client: %08x", gpu->bsc);
+ }
+}
+
+static void bs_fini(struct msm_gpu *gpu)
+{
+ if (gpu->bsc) {
+ msm_bus_scale_unregister_client(gpu->bsc);
+ gpu->bsc = 0;
+ }
+}
+
+static void bs_set(struct msm_gpu *gpu, int idx)
+{
+ if (gpu->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(gpu->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_fini(struct msm_gpu *gpu) {}
+static void bs_set(struct msm_gpu *gpu, int idx) {}
+#endif
+
+static int enable_pwrrail(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret = 0;
+
+ if (gpu->gpu_reg) {
+ ret = regulator_enable(gpu->gpu_reg);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (gpu->gpu_cx) {
+ ret = regulator_enable(gpu->gpu_cx);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int disable_pwrrail(struct msm_gpu *gpu)
+{
+ if (gpu->gpu_cx)
+ regulator_disable(gpu->gpu_cx);
+ if (gpu->gpu_reg)
+ regulator_disable(gpu->gpu_reg);
+ return 0;
+}
+
+static int enable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_prepare(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->fast_rate)
+ clk_set_rate(rate_clk, gpu->fast_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_enable(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int disable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_disable(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->slow_rate)
+ clk_set_rate(rate_clk, gpu->slow_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_unprepare(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int enable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_prepare_enable(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, gpu->bus_freq);
+ return 0;
+}
+
+static int disable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_disable_unprepare(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, 0);
+ return 0;
+}
+
+int msm_gpu_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+
+ ret = enable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_axi(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+
+ ret = disable_axi(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+
+static void recover_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
+ struct drm_device *dev = gpu->dev;
+
+ dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->recover(gpu);
+ mutex_unlock(&dev->struct_mutex);
+
+ msm_gpu_retire(gpu);
+}
+
+static void hangcheck_timer_reset(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ mod_timer(&gpu->hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
+}
+
+static void hangcheck_handler(unsigned long data)
+{
+ struct msm_gpu *gpu = (struct msm_gpu *)data;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ if (fence != gpu->hangcheck_fence) {
+ /* some progress has been made.. ya! */
+ gpu->hangcheck_fence = fence;
+ } else if (fence < gpu->submitted_fence) {
+ /* no progress and not done.. hung! */
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ gpu->hangcheck_fence = fence;
+ queue_work(priv->wq, &gpu->recover_work);
+ }
+
+ /* if still more pending work, reset the hangcheck timer: */
+ if (gpu->submitted_fence > gpu->hangcheck_fence)
+ hangcheck_timer_reset(gpu);
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ mutex_lock(&dev->struct_mutex);
+
+ while (!list_empty(&gpu->active_list)) {
+ struct msm_gem_object *obj;
+
+ obj = list_first_entry(&gpu->active_list,
+ struct msm_gem_object, mm_list);
+
+ if (obj->fence <= fence) {
+ /* move to inactive: */
+ msm_gem_move_to_inactive(&obj->base);
+ msm_gem_put_iova(&obj->base, gpu->id);
+ drm_gem_object_unreference(&obj->base);
+ } else {
+ break;
+ }
+ }
+
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* call from irq handler to schedule work to retire bo's */
+void msm_gpu_retire(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ queue_work(priv->wq, &gpu->retire_work);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ int i, ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ submit->fence = ++priv->next_fence;
+
+ gpu->submitted_fence = submit->fence;
+
+ ret = gpu->funcs->submit(gpu, submit, ctx);
+ priv->lastctx = ctx;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ /* can't happen yet.. but when we add 2d support we'll have
+ * to deal w/ cross-ring synchronization:
+ */
+ WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
+
+ if (!is_active(msm_obj)) {
+ uint32_t iova;
+
+ /* ring takes a reference to the bo and iova: */
+ drm_gem_object_reference(&msm_obj->base);
+ msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
+ }
+
+ msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+ }
+ hangcheck_timer_reset(gpu);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/*
+ * Init/Cleanup:
+ */
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct msm_gpu *gpu = data;
+ return gpu->funcs->irq(gpu);
+}
+
+static const char *clk_names[] = {
+ "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
+};
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz)
+{
+ int i, ret;
+
+ gpu->dev = drm;
+ gpu->funcs = funcs;
+ gpu->name = name;
+
+ INIT_LIST_HEAD(&gpu->active_list);
+ INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->recover_work, recover_worker);
+
+ setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
+ (unsigned long)gpu);
+
+ BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
+
+ /* Map registers: */
+ gpu->mmio = msm_ioremap(pdev, ioname, name);
+ if (IS_ERR(gpu->mmio)) {
+ ret = PTR_ERR(gpu->mmio);
+ goto fail;
+ }
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq_byname(pdev, irqname);
+ if (gpu->irq < 0) {
+ ret = gpu->irq;
+ dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
+ IRQF_TRIGGER_HIGH, gpu->name, gpu);
+ if (ret) {
+ dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ goto fail;
+ }
+
+ /* Acquire clocks: */
+ for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
+ gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+ DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
+ if (IS_ERR(gpu->grp_clks[i]))
+ gpu->grp_clks[i] = NULL;
+ }
+
+ gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ DBG("ebi1_clk: %p", gpu->ebi1_clk);
+ if (IS_ERR(gpu->ebi1_clk))
+ gpu->ebi1_clk = NULL;
+
+ /* Acquire regulators: */
+ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
+ DBG("gpu_reg: %p", gpu->gpu_reg);
+ if (IS_ERR(gpu->gpu_reg))
+ gpu->gpu_reg = NULL;
+
+ gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
+ DBG("gpu_cx: %p", gpu->gpu_cx);
+ if (IS_ERR(gpu->gpu_cx))
+ gpu->gpu_cx = NULL;
+
+ /* Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ gpu->iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!gpu->iommu) {
+ dev_err(drm->dev, "failed to allocate IOMMU\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ gpu->id = msm_register_iommu(drm, gpu->iommu);
+
+ /* Create ringbuffer: */
+ gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ if (IS_ERR(gpu->rb)) {
+ ret = PTR_ERR(gpu->rb);
+ gpu->rb = NULL;
+ dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ bs_init(gpu, pdev);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_gpu_cleanup(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+
+ WARN_ON(!list_empty(&gpu->active_list));
+
+ bs_fini(gpu);
+
+ if (gpu->rb) {
+ if (gpu->rb_iova)
+ msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_ringbuffer_destroy(gpu->rb);
+ }
+
+ if (gpu->iommu)
+ iommu_domain_free(gpu->iommu);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 00000000000..8cd829e520b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GPU_H__
+#define __MSM_GPU_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_ringbuffer.h"
+
+struct msm_gem_submit;
+
+/* So far, with hardware that I've seen to date, we can have:
+ * + zero, one, or two z180 2d cores
+ * + a3xx or a2xx 3d core, which share a common CP (the firmware
+ * for the CP seems to implement some different PM4 packet types
+ * but the basics of cmdstream submission are the same)
+ *
+ * Which means that the eventual complete "class" hierarchy, once
+ * support for all past and present hw is in place, becomes:
+ * + msm_gpu
+ * + adreno_gpu
+ * + a3xx_gpu
+ * + a2xx_gpu
+ * + z180_gpu
+ */
+struct msm_gpu_funcs {
+ int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+ int (*hw_init)(struct msm_gpu *gpu);
+ int (*pm_suspend)(struct msm_gpu *gpu);
+ int (*pm_resume)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+ void (*flush)(struct msm_gpu *gpu);
+ void (*idle)(struct msm_gpu *gpu);
+ irqreturn_t (*irq)(struct msm_gpu *irq);
+ uint32_t (*last_fence)(struct msm_gpu *gpu);
+ void (*recover)(struct msm_gpu *gpu);
+ void (*destroy)(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+ /* show GPU status in debugfs: */
+ void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+};
+
+struct msm_gpu {
+ const char *name;
+ struct drm_device *dev;
+ const struct msm_gpu_funcs *funcs;
+
+ struct msm_ringbuffer *rb;
+ uint32_t rb_iova;
+
+ /* list of GEM active objects: */
+ struct list_head active_list;
+
+ uint32_t submitted_fence;
+
+ /* worker for handling active-list retiring: */
+ struct work_struct retire_work;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct iommu_domain *iommu;
+ int id;
+
+ /* Power Control: */
+ struct regulator *gpu_reg, *gpu_cx;
+ struct clk *ebi1_clk, *grp_clks[5];
+ uint32_t fast_rate, slow_rate, bus_freq;
+ uint32_t bsc;
+
+ /* Hang Detction: */
+#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
+#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ uint32_t hangcheck_fence;
+ struct work_struct recover_work;
+};
+
+static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
+{
+ msm_writel(data, gpu->mmio + (reg << 2));
+}
+
+static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
+{
+ return msm_readl(gpu->mmio + (reg << 2));
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+int msm_gpu_pm_resume(struct msm_gpu *gpu);
+
+void msm_gpu_retire(struct msm_gpu *gpu);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz);
+void msm_gpu_cleanup(struct msm_gpu *gpu);
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+void __init a3xx_register(void);
+void __exit a3xx_unregister(void);
+
+#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 00000000000..8171537dd7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_ringbuffer.h"
+#include "msm_gpu.h"
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+{
+ struct msm_ringbuffer *ring;
+ int ret;
+
+ size = ALIGN(size, 4); /* size should be dword aligned */
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ring->gpu = gpu;
+ ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ if (IS_ERR(ring->bo)) {
+ ret = PTR_ERR(ring->bo);
+ ring->bo = NULL;
+ goto fail;
+ }
+
+ ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->end = ring->start + (size / 4);
+ ring->cur = ring->start;
+
+ ring->size = size;
+
+ return ring;
+
+fail:
+ if (ring)
+ msm_ringbuffer_destroy(ring);
+ return ERR_PTR(ret);
+}
+
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
+{
+ if (ring->bo)
+ drm_gem_object_unreference(ring->bo);
+ kfree(ring);
+}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 00000000000..6e0e1049fa4
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_RINGBUFFER_H__
+#define __MSM_RINGBUFFER_H__
+
+#include "msm_drv.h"
+
+struct msm_ringbuffer {
+ struct msm_gpu *gpu;
+ int size;
+ struct drm_gem_object *bo;
+ uint32_t *start, *end, *cur;
+};
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
+
+/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
+
+static inline void
+OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
+{
+ if (ring->cur == ring->end)
+ ring->cur = ring->start;
+ *(ring->cur++) = data;
+}
+
+#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 6161eaf5447..52fb2aa129e 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,6 +27,8 @@
#include <core/subdev.h>
#include <core/printk.h>
+int nv_printk_suspend_level = NV_DBG_DEBUG;
+
void
nv_printk_(struct nouveau_object *object, const char *pfx, int level,
const char *fmt, ...)
@@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
vprintk(mfmt, args);
va_end(args);
}
+
+#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
+
+const char *nv_printk_level_to_pfx(int level)
+{
+ switch (level) {
+ CONV_LEVEL(FATAL);
+ CONV_LEVEL(ERROR);
+ CONV_LEVEL(WARN);
+ CONV_LEVEL(INFO);
+ CONV_LEVEL(DEBUG);
+ CONV_LEVEL(PARANOIA);
+ CONV_LEVEL(TRACE);
+ CONV_LEVEL(SPAM);
+ }
+ return NV_PRINTK_DEBUG;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 86a64045dd6..f3b9bddc387 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -22,7 +22,6 @@
#include <core/object.h>
#include <core/ramht.h>
-#include <core/math.h>
#include <subdev/bar.h>
@@ -104,6 +103,6 @@ nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
if (ret)
return ret;
- ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
+ ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
index 8bf92b0e6d8..6b089e022fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/bsp.h>
struct nv98_bsp_priv {
- struct nouveau_engine base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -37,31 +35,49 @@ struct nv98_bsp_priv {
static struct nouveau_oclass
nv98_bsp_sclass[] = {
+ { 0x88b1, &nouveau_object_ofuncs },
+ { 0x85b1, &nouveau_object_ofuncs },
+ { 0x86b1, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
- * BSP context
+ * PBSP context
******************************************************************************/
static struct nouveau_oclass
nv98_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
/*******************************************************************************
- * BSP engine/subdev functions
+ * PBSP engine/subdev functions
******************************************************************************/
static int
+nv98_bsp_init(struct nouveau_object *object)
+{
+ struct nv98_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000ffd2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -69,7 +85,7 @@ nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_bsp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
"PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -86,8 +102,10 @@ nv98_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_bsp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index f02fd9f443f..a66b27c0fca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -49,18 +49,23 @@ int
nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
{
const u32 doff = (or * 0x800);
- int load = -EINVAL;
+
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
mdelay(9);
udelay(500);
- nv_wr32(priv, 0x61a00c + doff, 0x80000000);
- load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
- nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- return load;
+
+ nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+ if (!(loadval & 0x80000000))
+ return -ETIMEDOUT;
+
+ return (loadval & 0x38000000) >> 27;
}
int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f..054d9cff4f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
if (ret)
return ret;
- DBG("status %*ph\n", 6, dp->stat);
+ DBG("status %6ph\n", dp->stat);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 7ffe2f309f1..c168ae3eaa9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -628,7 +628,7 @@ nv50_disp_base_init(struct nouveau_object *object)
}
/* ... PIOR caps */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < priv->pior.nr; i++) {
tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
}
@@ -834,10 +834,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
u8 ver, hdr, cnt, len;
u16 data;
u32 ctrl = 0x00000000;
+ u32 reg;
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
/* SOR */
@@ -845,19 +846,18 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
- i += 4;
+ reg = 0x610b74;
} else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610798 + (i * 8));
- i += 4;
+ reg = 0x610798;
}
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
i += 8;
}
@@ -893,10 +893,11 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
u8 ver, hdr, cnt, len;
u32 ctrl = 0x00000000;
u32 data, conf = ~0;
+ u32 reg;
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
/* SOR */
@@ -904,19 +905,18 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
- i += 4;
+ reg = 0x610b70;
} else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610794 + (i * 8));
- i += 4;
+ reg = 0x610794;
}
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
i += 8;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e9b8217d007..7e5dff51d3c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -26,7 +26,6 @@
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/class.h>
-#include <core/math.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -278,7 +277,7 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
return ret;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 7f53196cff5..91a87cd7195 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -28,7 +28,6 @@
#include <core/ramht.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -57,6 +56,7 @@ nv84_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0020; break;
case NVDEV_ENGINE_VP : addr = 0x0040; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
case NVDEV_ENGINE_BSP : addr = 0x0080; break;
case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
@@ -92,6 +92,7 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break;
case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
@@ -258,7 +259,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 46dfa68c47b..ce92f289e75 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -29,7 +29,6 @@
#include <core/engctx.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -200,7 +199,7 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
usermem = chan->base.chid * 0x1000;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 09644fa9602..8e8121abe31 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -29,7 +29,6 @@
#include <core/engctx.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -240,7 +239,7 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
usermem = chan->base.chid * 0x200;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
for (i = 0; i < 0x200; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index 7da35a4e797..ad820937752 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -1,6 +1,9 @@
#ifndef __NV40_GRAPH_H__
#define __NV40_GRAPH_H__
+#include <core/device.h>
+#include <core/gpuobj.h>
+
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 5a5b2a773ed..13bf31c40aa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -19,21 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engine.h>
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_engine base;
-};
-
-struct nv98_ppp_chan {
- struct nouveau_engctx base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -42,6 +35,8 @@ struct nv98_ppp_chan {
static struct nouveau_oclass
nv98_ppp_sclass[] = {
+ { 0x88b3, &nouveau_object_ofuncs },
+ { 0x85b3, &nouveau_object_ofuncs },
{},
};
@@ -53,12 +48,12 @@ static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -67,6 +62,21 @@ nv98_ppp_cclass = {
******************************************************************************/
static int
+nv98_ppp_init(struct nouveau_object *object)
+{
+ struct nv98_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000ffd2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -74,7 +84,7 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
"PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -91,8 +101,10 @@ nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
index 8a8236bc84d..fc9ae0ff1ef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/vp.h>
struct nv98_vp_priv {
- struct nouveau_engine base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -37,6 +35,8 @@ struct nv98_vp_priv {
static struct nouveau_oclass
nv98_vp_sclass[] = {
+ { 0x88b2, &nouveau_object_ofuncs },
+ { 0x85b2, &nouveau_object_ofuncs },
{},
};
@@ -48,12 +48,12 @@ static struct nouveau_oclass
nv98_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -62,6 +62,21 @@ nv98_vp_cclass = {
******************************************************************************/
static int
+nv98_vp_init(struct nouveau_object *object)
+{
+ struct nv98_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000ffd2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -69,7 +84,7 @@ nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_vp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
"PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -86,8 +101,10 @@ nv98_vp_oclass = {
.handle = NV_ENGINE(VP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_vp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
deleted file mode 100644
index f808131c5cd..00000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/math.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __NOUVEAU_MATH_H__
-#define __NOUVEAU_MATH_H__
-
-static inline int
-log2i(u64 base)
-{
- u64 temp = base >> 1;
- int log2;
-
- for (log2 = 0; temp; log2++, temp >>= 1) {
- }
-
- return (base & (base - 1)) ? log2 + 1: log2;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index febed2ea5c8..d87836e3a70 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,6 +15,12 @@ struct nouveau_object;
#define NV_PRINTK_TRACE KERN_DEBUG
#define NV_PRINTK_SPAM KERN_DEBUG
+extern int nv_printk_suspend_level;
+
+#define NV_DBG_SUSPEND (nv_printk_suspend_level)
+#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
+
+const char *nv_printk_level_to_pfx(int level);
void __printf(4, 5)
nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
@@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
+
+static inline void nv_suspend_set_printk_level(int level)
+{
+ nv_printk_suspend_level = level;
+}
+
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 888384c0bed..7e4e2775f24 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -39,8 +39,8 @@ struct nouveau_i2c_func {
int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
-#define nouveau_i2c_port_create(p,e,o,i,a,d) \
- nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
+#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
sizeof(**d), (void **)d)
#define nouveau_i2c_port_destroy(p) ({ \
struct nouveau_i2c_port *port = (p); \
@@ -53,7 +53,9 @@ struct nouveau_i2c_func {
int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u8,
- const struct i2c_algorithm *, int, void **);
+ const struct i2c_algorithm *,
+ const struct nouveau_i2c_func *,
+ int, void **);
void _nouveau_i2c_port_dtor(struct nouveau_object *);
#define _nouveau_i2c_port_init nouveau_object_init
#define _nouveau_i2c_port_fini nouveau_object_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 9d2cd200625..ce6569f365a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -12,6 +12,7 @@ struct nouveau_mc_intr {
struct nouveau_mc {
struct nouveau_subdev base;
const struct nouveau_mc_intr *intr_map;
+ bool use_msi;
};
static inline struct nouveau_mc *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index e465d158d35..9ab70dfe5b0 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -22,6 +22,7 @@ bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
+void nouveau_timer_alarm_cancel(void *, struct nouveau_alarm *);
#define NV_WAIT_DEFAULT 2000000000ULL
#define nv_wait(o,a,m,v) \
@@ -35,6 +36,7 @@ struct nouveau_timer {
struct nouveau_subdev base;
u64 (*read)(struct nouveau_timer *);
void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
+ void (*alarm_cancel)(struct nouveau_timer *, struct nouveau_alarm *);
};
static inline struct nouveau_timer *
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index 3bd9be2ab37..191e739f30d 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -13,11 +13,13 @@
#include <linux/i2c-algo-bit.h>
#include <linux/delay.h>
#include <linux/io-mapping.h>
-#include <linux/vmalloc.h>
#include <linux/acpi.h>
+#include <linux/vmalloc.h>
#include <linux/dmi.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 0687e648143..2e11ea02cf8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
u16 data;
if (execute)
- nv_info(bios, "running init tables\n");
+ nv_suspend(bios, "running init tables\n");
while (!ret && (data = (init_script(bios, ++i)))) {
struct nvbios_init init = {
.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 22a20573ed1..22ac6dbd6c8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -184,7 +184,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
cur_trip->fan_duty = value;
break;
case 0x26:
- fan->pwm_freq = value;
+ if (!fan->pwm_freq)
+ fan->pwm_freq = value;
break;
case 0x3b:
fan->bump_period = value;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
index dec94e9d776..4b195ac4da6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -118,7 +118,8 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &chan);
+ &nouveau_i2c_aux_algo, &anx9805_aux_func,
+ &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -140,8 +141,6 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
-
- chan->base.func = &anx9805_aux_func;
return 0;
}
@@ -234,7 +233,8 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &anx9805_i2c_algo, &port);
+ &anx9805_i2c_algo, &anx9805_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -256,8 +256,6 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
-
- port->base.func = &anx9805_i2c_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 8ae2625415e..2895c19bb15 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -95,6 +95,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u8 index,
const struct i2c_algorithm *algo,
+ const struct nouveau_i2c_func *func,
int size, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
@@ -112,6 +113,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
port->adapter.owner = THIS_MODULE;
port->adapter.dev.parent = &device->pdev->dev;
port->index = index;
+ port->func = func;
i2c_set_adapdata(&port->adapter, i2c);
if ( algo == &nouveau_i2c_bit_algo &&
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
index 2ad18840fe6..860d5d2365d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -91,12 +91,12 @@ nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv04_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv04_i2c_func;
port->drive = info->drive;
port->sense = info->sense;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
index f501ae25dbb..0c2655a03bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -84,12 +84,12 @@ nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv4e_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv4e_i2c_func;
port->addr = 0x600800 + info->drive;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
index 378dfa324e5..a8d67a28770 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -85,7 +85,8 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv50_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -93,7 +94,6 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (info->drive >= nv50_i2c_addr_nr)
return -EINVAL;
- port->base.func = &nv50_i2c_func;
port->state = 0x00000007;
port->addr = nv50_i2c_addr[info->drive];
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index 61b771670bf..df6d3e4b68b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -186,7 +186,8 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv94_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -194,7 +195,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (info->drive >= nv50_i2c_addr_nr)
return -EINVAL;
- port->base.func = &nv94_i2c_func;
port->state = 7;
port->addr = nv50_i2c_addr[info->drive];
if (info->share != DCB_I2C_UNUSED) {
@@ -221,12 +221,12 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &port);
+ &nouveau_i2c_aux_algo, &nv94_aux_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv94_aux_func;
port->addr = info->drive;
if (info->share != DCB_I2C_UNUSED) {
port->ctrl = 0x00e500 + (info->drive * 0x50);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index f761b8a610f..29967d30f97 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -60,12 +60,12 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nvd0_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nvd0_i2c_func;
port->state = 0x00000007;
port->addr = 0x00d014 + (info->drive * 0x20);
if (info->share != DCB_I2C_UNUSED) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 716bf41bc3c..b10a143787a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -22,15 +22,9 @@
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include <engine/graph/nv40.h>
-static inline int
-nv44_graph_class(struct nv04_instmem_priv *priv)
-{
- if ((nv_device(priv)->chipset & 0xf0) == 0x60)
- return 1;
- return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
-}
+#include "nv04.h"
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index ec9cd6f10f9..37712a6df92 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -23,16 +23,20 @@
*/
#include <subdev/mc.h>
+#include <core/option.h>
static irqreturn_t
nouveau_mc_intr(int irq, void *arg)
{
struct nouveau_mc *pmc = arg;
const struct nouveau_mc_intr *map = pmc->intr_map;
+ struct nouveau_device *device = nv_device(pmc);
struct nouveau_subdev *unit;
u32 stat, intr;
intr = stat = nv_rd32(pmc, 0x000100);
+ if (intr == 0xffffffff)
+ return IRQ_NONE;
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(pmc, map->unit);
@@ -43,10 +47,15 @@ nouveau_mc_intr(int irq, void *arg)
map++;
}
+ if (pmc->use_msi)
+ nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
+
if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
+ if (stat == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(&device->pdev->dev);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
@@ -75,6 +84,8 @@ _nouveau_mc_dtor(struct nouveau_object *object)
struct nouveau_device *device = nv_device(object);
struct nouveau_mc *pmc = (void *)object;
free_irq(device->pdev->irq, pmc);
+ if (pmc->use_msi)
+ pci_disable_msi(device->pdev);
nouveau_subdev_destroy(&pmc->base);
}
@@ -96,6 +107,23 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
pmc->intr_map = intr_map;
+ switch (device->pdev->device & 0x0ff0) {
+ case 0x00f0: /* BR02? */
+ case 0x02e0: /* BR02? */
+ pmc->use_msi = false;
+ break;
+ default:
+ pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
+ if (pmc->use_msi) {
+ pmc->use_msi = pci_enable_msi(device->pdev) == 0;
+ if (pmc->use_msi) {
+ nv_info(pmc, "MSI interrupts enabled\n");
+ nv_wr08(device, 0x00088068, 0xff);
+ }
+ }
+ break;
+ }
+
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 0d57b4d3e00..06710419a59 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,6 +35,7 @@ nv98_mc_intr[] = {
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -42,7 +43,7 @@ nv98_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0040d101, NVDEV_SUBDEV_FB },
+ { 0x0042d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index a00a5a76e2d..f1de7a9c572 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -95,12 +95,14 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
int duty;
spin_lock_irqsave(&priv->lock, flags);
+ nv_debug(therm, "FAN speed check\n");
if (mode < 0)
mode = priv->mode;
priv->mode = mode;
switch (mode) {
case NOUVEAU_THERM_CTRL_MANUAL:
+ ptimer->alarm_cancel(ptimer, &priv->alarm);
duty = nouveau_therm_fan_get(therm);
if (duty < 0)
duty = 100;
@@ -113,6 +115,7 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
break;
case NOUVEAU_THERM_CTRL_NONE:
default:
+ ptimer->alarm_cancel(ptimer, &priv->alarm);
goto done;
}
@@ -122,6 +125,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
done:
if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+ else if (!list_empty(&priv->alarm.head))
+ nv_debug(therm, "therm fan alarm list is not empty\n");
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -267,9 +272,15 @@ _nouveau_therm_init(struct nouveau_object *object)
if (ret)
return ret;
- if (priv->suspend >= 0)
- nouveau_therm_fan_mode(therm, priv->mode);
- priv->sensor.program_alarms(therm);
+ if (priv->suspend >= 0) {
+ /* restore the pwm value only when on manual or auto mode */
+ if (priv->suspend > 0)
+ nouveau_therm_fan_set(therm, true, priv->fan->percent);
+
+ nouveau_therm_fan_mode(therm, priv->suspend);
+ }
+ nouveau_therm_sensor_init(therm);
+ nouveau_therm_fan_init(therm);
return 0;
}
@@ -279,6 +290,8 @@ _nouveau_therm_fini(struct nouveau_object *object, bool suspend)
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
+ nouveau_therm_fan_fini(therm, suspend);
+ nouveau_therm_sensor_fini(therm, suspend);
if (suspend) {
priv->suspend = priv->mode;
priv->mode = NOUVEAU_THERM_CTRL_NONE;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index c728380d3d6..39f47b950ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -204,6 +204,23 @@ nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
}
int
+nouveau_therm_fan_init(struct nouveau_therm *therm)
+{
+ return 0;
+}
+
+int
+nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+
+ if (suspend)
+ ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+ return 0;
+}
+
+int
nouveau_therm_fan_ctor(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
@@ -234,6 +251,9 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
nv_info(therm, "FAN control: %s\n", priv->fan->type);
+ /* read the current speed, it is useful when resuming */
+ priv->fan->percent = nouveau_therm_fan_get(therm);
+
/* attempt to detect a tachometer connection */
ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 15ca64e481f..dd38529262f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -113,6 +113,8 @@ void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
+int nouveau_therm_fan_init(struct nouveau_therm *therm);
+int nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend);
int nouveau_therm_fan_get(struct nouveau_therm *therm);
int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
@@ -122,6 +124,8 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
int nouveau_therm_preinit(struct nouveau_therm *);
+int nouveau_therm_sensor_init(struct nouveau_therm *therm);
+int nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend);
void nouveau_therm_sensor_preinit(struct nouveau_therm *);
void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
enum nouveau_therm_thrs thrs,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index dde746c78c8..b80a33011b9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,6 +180,8 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+ nv_debug(therm, "polling the internal temperature\n");
+
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
NOUVEAU_THERM_THRS_FANBOOST);
@@ -216,6 +218,25 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
+int
+nouveau_therm_sensor_init(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ priv->sensor.program_alarms(therm);
+ return 0;
+}
+
+int
+nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+
+ if (suspend)
+ ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+ return 0;
+}
+
void
nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
index 5d417cc9949..cf8a0e0f8ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -85,3 +85,10 @@ nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
struct nouveau_timer *ptimer = nouveau_timer(obj);
ptimer->alarm(ptimer, nsec, alarm);
}
+
+void
+nouveau_timer_alarm_cancel(void *obj, struct nouveau_alarm *alarm)
+{
+ struct nouveau_timer *ptimer = nouveau_timer(obj);
+ ptimer->alarm_cancel(ptimer, alarm);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 9469b827567..57711ecb566 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -36,6 +36,7 @@ struct nv04_timer_priv {
struct nouveau_timer base;
struct list_head alarms;
spinlock_t lock;
+ u64 suspend_time;
};
static u64
@@ -113,6 +114,25 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
}
static void
+nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
+ struct nouveau_alarm *alarm)
+{
+ struct nv04_timer_priv *priv = (void *)ptimer;
+ unsigned long flags;
+
+ /* avoid deleting an entry while the alarm intr is running */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* delete the alarm from the list */
+ list_del(&alarm->head);
+
+ /* reset the head so as list_empty returns 1 */
+ INIT_LIST_HEAD(&alarm->head);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
nv04_timer_intr(struct nouveau_subdev *subdev)
{
struct nv04_timer_priv *priv = (void *)subdev;
@@ -146,6 +166,8 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.base.intr = nv04_timer_intr;
priv->base.read = nv04_timer_read;
priv->base.alarm = nv04_timer_alarm;
+ priv->base.alarm_cancel = nv04_timer_alarm_cancel;
+ priv->suspend_time = 0;
INIT_LIST_HEAD(&priv->alarms);
spin_lock_init(&priv->lock);
@@ -164,7 +186,7 @@ nv04_timer_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
struct nv04_timer_priv *priv = (void *)object;
- u32 m = 1, f, n, d;
+ u32 m = 1, f, n, d, lo, hi;
int ret;
ret = nouveau_timer_init(&priv->base);
@@ -221,16 +243,25 @@ nv04_timer_init(struct nouveau_object *object)
d >>= 1;
}
+ /* restore the time before suspend */
+ lo = priv->suspend_time;
+ hi = (priv->suspend_time >> 32);
+
nv_debug(priv, "input frequency : %dHz\n", f);
nv_debug(priv, "input multiplier: %d\n", m);
nv_debug(priv, "numerator : 0x%08x\n", n);
nv_debug(priv, "denominator : 0x%08x\n", d);
nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
+ nv_debug(priv, "time low : 0x%08x\n", lo);
+ nv_debug(priv, "time high : 0x%08x\n", hi);
nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
+ nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
+
return 0;
}
@@ -238,6 +269,8 @@ static int
nv04_timer_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_timer_priv *priv = (void *)object;
+ if (suspend)
+ priv->suspend_time = nv04_timer_read(&priv->base);
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
return nouveau_timer_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 07dd1fe2d6f..a4aa81a2173 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -174,6 +174,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
case NVDEV_ENGINE_GR : vme = 0x00; break;
case NVDEV_ENGINE_VP : vme = 0x01; break;
case NVDEV_SUBDEV_BAR : vme = 0x06; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : vme = 0x08; break;
case NVDEV_ENGINE_BSP : vme = 0x09; break;
case NVDEV_ENGINE_CRYPT: vme = 0x0a; break;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6a13ffb53bd..d4fbf11360f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,6 +22,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -1034,13 +1035,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
+int
+nouveau_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct nouveau_drm *drm;
+ int ret;
+ struct drm_crtc *crtc;
+ bool active = false;
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ dev = set->crtc->dev;
+
+ /* get a pm reference here */
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_crtc_helper_set_config(set);
+
+ drm = nouveau_drm(dev);
+
+ /* if we get here with no crtcs active then we can drop a reference */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ active = true;
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ /* if we have active crtcs and we don't have a power ref,
+ take the current one */
+ if (active && !drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ /* if we have no active crtcs, then drop the power ref
+ we got before */
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
static const struct drm_crtc_funcs nv04_crtc_funcs = {
.save = nv_crtc_save,
.restore = nv_crtc_restore,
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = nouveau_crtc_set_config,
.page_flip = nouveau_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d97f20069d3..dd7d2e18271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -25,8 +25,27 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
-#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
+#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
+#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
+
+#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
+#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
+#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
+
+#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
+
+/* result of the optimus caps function */
+#define OPTIMUS_ENABLED (1 << 0)
+#define OPTIMUS_STATUS_MASK (3 << 3)
+#define OPTIMUS_STATUS_OFF (0 << 3)
+#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
+#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
+#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
+#define OPTIMUS_CAPS_MASK (7 << 24)
+#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
+
+#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
+#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
static struct nouveau_dsm_priv {
bool dsm_detected;
@@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
retval |= NOUVEAU_DSM_HAS_MUX;
if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_FN))
+ NOUVEAU_DSM_OPTIMUS_CAPS))
retval |= NOUVEAU_DSM_HAS_OPT;
+ if (retval & NOUVEAU_DSM_HAS_OPT) {
+ uint32_t result;
+ nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
+ &result);
+ dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
+ (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
+ (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
+ (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
+ }
if (retval)
nouveau_dsm_priv.dhandle = dhandle;
@@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void)
if (!nouveau_dsm_priv.optimus_detected)
return;
- nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
- NOUVEAU_DSM_OPTIMUS_ARGS, &result);
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
+ 0x3, &result);
+
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
+ NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
+
}
void nouveau_unregister_dsm_handler(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index af20fba3a1a..755c38d0627 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1265,7 +1265,9 @@ out:
static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4da776f344d..c5b36f9e9a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -26,6 +26,8 @@
#include <acpi/button.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct nouveau_encoder *nv_partner;
struct nouveau_i2c_port *i2c;
int type;
+ int ret;
+ enum drm_connector_status conn_status = connector_status_disconnected;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
@@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0)
+ return conn_status;
+
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
@@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
!nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
NV_ERROR(drm, "Detected %s, but failed init\n",
drm_get_connector_name(connector));
- return connector_status_disconnected;
+ conn_status = connector_status_disconnected;
+ goto out;
}
/* Override encoder type for DVI-I based on whether EDID
@@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
nv_encoder = nouveau_connector_of_detect(connector);
if (nv_encoder) {
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
detect_analog:
@@ -311,12 +322,18 @@ detect_analog:
if (helper->detect(encoder, connector) ==
connector_status_connected) {
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
}
- return connector_status_disconnected;
+ out:
+
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
+ return conn_status;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a03e75deaca..d2712e6e5d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -107,6 +107,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ NV_ERROR(drm, "framebuffer requires contiguous bo\n");
+ return -EINVAL;
+ }
+
if (nv_device(drm->device)->chipset == 0x50)
nv_fb->r_format |= (tile_flags << 8);
@@ -394,7 +399,7 @@ nouveau_display_suspend(struct drm_device *dev)
nouveau_display_fini(dev);
- NV_INFO(drm, "unpinning framebuffer(s)...\n");
+ NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -416,7 +421,7 @@ nouveau_display_suspend(struct drm_device *dev)
}
void
-nouveau_display_resume(struct drm_device *dev)
+nouveau_display_repin(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
@@ -441,10 +446,12 @@ nouveau_display_resume(struct drm_device *dev)
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
}
+}
- nouveau_fbcon_set_suspend(dev, 0);
- nouveau_fbcon_zfill_all(dev);
-
+void
+nouveau_display_resume(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
@@ -519,7 +526,8 @@ fail:
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -677,13 +685,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
}
int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
-int
nouveau_display_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *poffset)
@@ -693,7 +694,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = gem->driver_private;
- *poffset = bo->bo.addr_space_offset;
+ *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b6..025c66f8e0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -57,10 +57,12 @@ void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
+void nouveau_display_repin(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
@@ -68,11 +70,10 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
u32 handle, u64 *offset);
-int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
- u32 handle);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+int nouveau_crtc_set_config(struct drm_mode_set *set);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_device *);
extern void nouveau_backlight_exit(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 61972668fd0..8863644024b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -25,7 +25,10 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drmP.h"
+#include "drm_crtc_helper.h"
#include <core/device.h>
#include <core/client.h>
#include <core/gpuobj.h>
@@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
+int nouveau_runtime_pm = -1;
+module_param_named(runpm, nouveau_runtime_pm, int, 0400);
+
static struct drm_driver driver;
static int
@@ -296,6 +303,31 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
+
+static void
+nouveau_get_hdmi_dev(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+
+ /* subfunction one is a hdmi audio device? */
+ drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
+
+ if (!drm->hdmi_device) {
+ DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
+ return;
+ }
+
+ if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
+ DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
+ pci_dev_put(drm->hdmi_device);
+ drm->hdmi_device = NULL;
+ return;
+ }
+}
+
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -314,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
+ nouveau_get_hdmi_dev(dev);
+
/* make sure AGP controller is in a consistent state before we
* (possibly) execute vbios init tables (see nouveau_agp.h)
*/
@@ -388,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+
+ if (nouveau_runtime_pm != 0) {
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_allow(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put(dev->dev);
+ }
return 0;
fail_dispinit:
@@ -409,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ pm_runtime_get_sync(dev->dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
@@ -424,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
+ if (drm->hdmi_device)
+ pci_dev_put(drm->hdmi_device);
nouveau_cli_destroy(&drm->client);
return 0;
}
@@ -450,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev)
int ret;
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "suspending fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
-
- NV_INFO(drm, "suspending display...\n");
+ NV_SUSPEND(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev);
if (ret)
return ret;
}
- NV_INFO(drm, "evicting buffers...\n");
+ NV_SUSPEND(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+ NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
@@ -475,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev)
return ret;
}
- NV_INFO(drm, "suspending client object trees...\n");
+ NV_SUSPEND(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm))
return -ENOMEM;
@@ -487,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev)
goto fail_client;
}
- NV_INFO(drm, "suspending kernel object tree...\n");
+ NV_SUSPEND(drm, "suspending kernel object tree...\n");
ret = nouveau_client_fini(&drm->client.base, true);
if (ret)
goto fail_client;
@@ -501,7 +544,7 @@ fail_client:
}
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_SUSPEND(drm, "resuming display...\n");
nouveau_display_resume(dev);
}
return ret;
@@ -513,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 1);
+
+ nv_suspend_set_printk_level(NV_DBG_INFO);
ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
@@ -523,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -533,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- NV_INFO(drm, "re-enabling device...\n");
+ NV_SUSPEND(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
- NV_INFO(drm, "resuming kernel object tree...\n");
+ NV_SUSPEND(drm, "resuming kernel object tree...\n");
nouveau_client_init(&drm->client.base);
nouveau_agp_init(drm);
- NV_INFO(drm, "resuming client object trees...\n");
+ NV_SUSPEND(drm, "resuming client object trees...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
@@ -553,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_pm_resume(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ NV_SUSPEND(drm, "resuming display...\n");
+ nouveau_display_repin(dev);
}
+
return 0;
}
@@ -565,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
@@ -575,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- return nouveau_do_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ ret = nouveau_do_resume(drm_dev);
+ if (ret) {
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
+ }
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+
+ nouveau_fbcon_zfill_all(drm_dev);
+ nouveau_display_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return 0;
}
static int nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 1);
- return nouveau_do_suspend(drm_dev);
+ ret = nouveau_do_suspend(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
}
static int nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
- return nouveau_do_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ ret = nouveau_do_resume(drm_dev);
+ if (ret) {
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
+ }
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ nouveau_fbcon_zfill_all(drm_dev);
+ nouveau_display_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return 0;
}
@@ -604,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
char name[32], tmpname[TASK_COMM_LEN];
int ret;
+ /* need to bring up power immediately if opening device */
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
if (ret)
- return ret;
+ goto out_suspend;
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
0x1000, &cli->base.vm);
if (ret) {
nouveau_cli_destroy(cli);
- return ret;
+ goto out_suspend;
}
}
@@ -625,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
mutex_lock(&drm->client.mutex);
list_add(&cli->head, &drm->clients);
mutex_unlock(&drm->client.mutex);
- return 0;
+
+out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return ret;
}
static void
@@ -634,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
struct nouveau_cli *cli = nouveau_cli(fpriv);
struct nouveau_drm *drm = nouveau_drm(dev);
+ pm_runtime_get_sync(dev->dev);
+
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);
mutex_unlock(&drm->client.mutex);
+
}
static void
@@ -647,33 +742,52 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_cli *cli = nouveau_cli(fpriv);
nouveau_cli_destroy(cli);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
-static struct drm_ioctl_desc
+static const struct drm_ioctl_desc
nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
+long nouveau_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ long ret;
+ dev = file_priv->minor->dev;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
static const struct file_operations
nouveau_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = nouveau_drm_ioctl,
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
@@ -684,8 +798,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
+ DRIVER_USE_AGP |
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
.load = nouveau_drm_load,
.unload = nouveau_drm_unload,
@@ -704,6 +818,7 @@ driver = {
.disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
+ .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +839,7 @@ driver = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
- .dumb_destroy = nouveau_display_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -753,6 +868,90 @@ nouveau_drm_pci_table[] = {
{}
};
+static int nouveau_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ nouveau_switcheroo_optimus_dsm();
+ ret = nouveau_do_suspend(drm_dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ return ret;
+}
+
+static int nouveau_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_device *device = nouveau_dev(drm_dev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = nouveau_do_resume(drm_dev);
+ nouveau_display_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
+ /* do magic */
+ nv_mask(device, 0x88488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ return ret;
+}
+
+static int nouveau_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_crtc *crtc;
+
+ if (nouveau_runtime_pm == 0)
+ return -EBUSY;
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EBUSY;
+ }
+
+ /* if we have a hdmi audio device - make sure it has a driver loaded */
+ if (drm->hdmi_device) {
+ if (!drm->hdmi_device->driver) {
+ DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
+
static const struct dev_pm_ops nouveau_pm_ops = {
.suspend = nouveau_pmops_suspend,
.resume = nouveau_pmops_resume,
@@ -760,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = {
.thaw = nouveau_pmops_thaw,
.poweroff = nouveau_pmops_freeze,
.restore = nouveau_pmops_resume,
+ .runtime_suspend = nouveau_pmops_runtime_suspend,
+ .runtime_resume = nouveau_pmops_runtime_resume,
+ .runtime_idle = nouveau_pmops_runtime_idle,
};
static struct pci_driver
@@ -774,8 +976,6 @@ nouveau_drm_pci_driver = {
static int __init
nouveau_drm_init(void)
{
- driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 41ff7e0d403..994fd6ec373 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+extern int nouveau_runtime_pm;
+
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
@@ -129,6 +131,12 @@ struct nouveau_drm {
/* power management */
struct nouveau_pm *pm;
+
+ /* display power reference */
+ bool have_disp_power_ref;
+
+ struct dev_pm_domain vga_pm_domain;
+ struct pci_dev *hdmi_device;
};
static inline struct nouveau_drm *
@@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev)
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
+#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 830cb7bad92..f32b71238c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = nvbo->bo.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -579,18 +579,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
}
+static inline void
+u_free(void *addr)
+{
+ if (!is_vmalloc_addr(addr))
+ kfree(addr);
+ else
+ vfree(addr);
+}
+
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
- mem = kmalloc(nmemb * size, GFP_KERNEL);
+ size *= nmemb;
+
+ mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!mem)
+ mem = vmalloc(size);
if (!mem)
return ERR_PTR(-ENOMEM);
- if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
- kfree(mem);
+ if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+ u_free(mem);
return ERR_PTR(-EFAULT);
}
@@ -676,7 +689,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
- kfree(reloc);
+ u_free(reloc);
return ret;
}
@@ -738,7 +751,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
- kfree(push);
+ u_free(push);
return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
@@ -849,8 +862,8 @@ out:
nouveau_fence_unref(&fence);
out_prevalid:
- kfree(bo);
- kfree(push);
+ u_free(bo);
+ u_free(push);
out_next:
if (chan->dma.ib_max) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 08214bcdcb1..c1a7e5a73a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -63,7 +63,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
- ret = drm_ioctl(filp, cmd, arg);
+ ret = nouveau_drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index ef2b2906d9e..3b9f2e5463a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -2,5 +2,6 @@
#define __NOUVEAU_IOCTL_H__
long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
+long nouveau_drm_ioctl(struct file *, unsigned int cmd, unsigned long arg);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 25d3495725e..81638d7f2ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
+ return;
+
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -78,8 +81,17 @@ void
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ bool runtime = false;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
+
+ if (nouveau_runtime_pm == 1)
+ runtime = true;
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+ runtime = true;
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+ vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b40a36c1b5..f8e66c08b11 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1326,7 +1326,7 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
.cursor_set = nv50_crtc_cursor_set,
.cursor_move = nv50_crtc_cursor_move,
.gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = nouveau_crtc_set_config,
.destroy = nv50_crtc_destroy,
.page_flip = nouveau_crtc_page_flip,
};
@@ -1583,7 +1583,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
load = 340;
ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
- if (ret || load != 7)
+ if (ret || !load)
return connector_status_disconnected;
return connector_status_connected;
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f284..778372b062a 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
omap_dmm_tiler.o \
tcm-sita.o
-# temporary:
-omapdrm-y += omap_gem_helpers.o
-
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 11a5263a5e9..0fd2eb139f6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -331,7 +331,8 @@ static void page_flip_cb(void *arg)
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c8..acf667859cb 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg)
goto error;
for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
- memset(map, 0, sizeof(h_adj * sizeof(*map)));
+ memset(map, 0, h_adj * sizeof(*map));
memset(global_map, ' ', (w_adj + 1) * h_adj);
for (i = 0; i < omap_dmm->container_height; i++) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a..2603d909f49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static int dev_firstopen(struct drm_device *dev)
-{
- DBG("firstopen: dev=%p", dev);
- return 0;
-}
-
/**
* lastclose - clean up after all DRM clients have exited
* @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
.release = drm_release,
.mmap = omap_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
};
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
.load = dev_load,
.unload = dev_unload,
.open = dev_open,
- .firstopen = dev_firstopen,
.lastclose = dev_lastclose,
.preclose = dev_preclose,
.postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = omap_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce2..30b95b73665 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
-int omap_framebuffer_replace(struct drm_framebuffer *a,
- struct drm_framebuffer *b, void *arg,
- void (*unpin)(void *arg, struct drm_gem_object *bo));
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+int omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8031402e795..f2b8f0668c0 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
}
}
-/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
- * buffers to unpin are just pushed to the unpin fifo so that the
- * caller can defer unpin until vblank.
- *
- * Note if this fails (ie. something went very wrong!), all buffers are
- * unpinned, and the caller disables the overlay. We could have tried
- * to revert back to the previous set of pinned buffers but if things are
- * hosed there is no guarantee that would succeed.
- */
-int omap_framebuffer_replace(struct drm_framebuffer *a,
- struct drm_framebuffer *b, void *arg,
- void (*unpin)(void *arg, struct drm_gem_object *bo))
+/* pin, prepare for scanout: */
+int omap_framebuffer_pin(struct drm_framebuffer *fb)
{
- int ret = 0, i, na, nb;
- struct omap_framebuffer *ofba = to_omap_framebuffer(a);
- struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
- uint32_t pinned_mask = 0;
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
- na = a ? drm_format_num_planes(a->pixel_format) : 0;
- nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
+ if (ret)
+ goto fail;
+ omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
+ }
- for (i = 0; i < max(na, nb); i++) {
- struct plane *pa, *pb;
+ return 0;
- pa = (i < na) ? &ofba->planes[i] : NULL;
- pb = (i < nb) ? &ofbb->planes[i] : NULL;
+fail:
+ for (i--; i >= 0; i--) {
+ struct plane *plane = &omap_fb->planes[i];
+ omap_gem_put_paddr(plane->bo);
+ plane->paddr = 0;
+ }
- if (pa)
- unpin(arg, pa->bo);
+ return ret;
+}
- if (pb && !ret) {
- ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
- if (!ret) {
- omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
- pinned_mask |= (1 << i);
- }
- }
- }
+/* unpin, no longer being scanned out: */
+int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
- if (ret) {
- /* something went wrong.. unpin what has been pinned */
- for (i = 0; i < nb; i++) {
- if (pinned_mask & (1 << i)) {
- struct plane *pb = &ofba->planes[i];
- unpin(arg, pb->bo);
- }
- }
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ ret = omap_gem_put_paddr(plane->bo);
+ if (ret)
+ goto fail;
+ plane->paddr = 0;
}
+ return 0;
+
+fail:
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9..533f6ebec53 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
+#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
* mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
* we actually want CMA memory for it all anyways..
*/
- pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ pages = drm_gem_get_pages(obj, GFP_KERNEL);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return 0;
free_pages:
- _drm_gem_put_pages(obj, pages, true, false);
+ drm_gem_put_pages(obj, pages, true, false);
return ret;
}
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
kfree(omap_obj->addrs);
omap_obj->addrs = NULL;
- _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ int ret;
+ size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (!obj->map_list.map) {
- /* Make it mmapable */
- size_t size = omap_gem_mmap_size(obj);
- int ret = _drm_gem_create_mmap_offset_size(obj, size);
-
- if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
- return 0;
- }
+ /* Make it mmapable */
+ size = omap_gem_mmap_size(obj);
+ ret = drm_gem_create_mmap_offset_size(obj, size);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
}
- return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+ return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * omap_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via omap_gem_dumb_create.
- */
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* omap_gem_dumb_map - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should
* not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height;
}
+ ret = 0;
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
- ret = drm_gem_private_object_init(dev, obj, size);
+ drm_gem_private_object_init(dev, obj, size);
else
ret = drm_gem_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644
index f9eb679eb79..00000000000
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-/* temporary copy of drm_gem_{get,put}_pages() until the
- * "drm/gem: add functions to get/put pages" patch is merged..
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/shmem_fs.h>
-
-#include <drm/drmP.h>
-
-/**
- * drm_gem_get_pages - helper to allocate backing pages for a GEM object
- * @obj: obj in question
- * @gfpmask: gfp mask of requested pages
- */
-struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
-{
- struct inode *inode;
- struct address_space *mapping;
- struct page *p, **pages;
- int i, npages;
-
- /* This is the shared memory object that backs the GEM resource */
- inode = file_inode(obj->filp);
- mapping = inode->i_mapping;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
-
- /* There is a hypothetical issue w/ drivers that require
- * buffer memory in the low 4GB.. if the pages are un-
- * pinned, and swapped out, they can end up swapped back
- * in above 4GB. If pages are already in memory, then
- * shmem_read_mapping_page_gfp will ignore the gfpmask,
- * even if the already in-memory page disobeys the mask.
- *
- * It is only a theoretical issue today, because none of
- * the devices with this limitation can be populated with
- * enough memory to trigger the issue. But this BUG_ON()
- * is here as a reminder in case the problem with
- * shmem_read_mapping_page_gfp() isn't solved by the time
- * it does become a real issue.
- *
- * See this thread: http://lkml.org/lkml/2011/7/11/238
- */
- BUG_ON((gfpmask & __GFP_DMA32) &&
- (page_to_pfn(p) >= 0x00100000UL));
- }
-
- return pages;
-
-fail:
- while (i--)
- page_cache_release(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-/**
- * drm_gem_put_pages - helper to free backing pages for a GEM object
- * @obj: obj in question
- * @pages: pages to free
- */
-void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
- bool dirty, bool accessed)
-{
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
-
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
-
- drm_free_large(pages);
-}
-
-int
-_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret = 0;
-
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- size / PAGE_SIZE, 0, 0);
-
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
-
- return ret;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 8d225d7ff4e..046d5e660c0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,7 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
struct omap_drm_irq error_irq;
- /* set of bo's pending unpin until next post_apply() */
- DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+ /* for deferring bo unpin's until next post_apply(): */
+ struct drm_flip_work unpin_work;
// XXX maybe get rid of this and handle vblank in crtc too?
struct callback apply_done_cb;
};
-static void unpin(void *arg, struct drm_gem_object *bo)
+static void unpin_worker(struct drm_flip_work *work, void *val)
{
- struct drm_plane *plane = arg;
- struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_plane *omap_plane =
+ container_of(work, struct omap_plane, unpin_work);
+ struct drm_device *dev = omap_plane->base.dev;
- if (kfifo_put(&omap_plane->unpin_fifo,
- (const struct drm_gem_object **)&bo)) {
- /* also hold a ref so it isn't free'd while pinned */
- drm_gem_object_reference(bo);
- } else {
- dev_err(plane->dev->dev, "unpin fifo full!\n");
- omap_gem_put_paddr(bo);
- }
+ omap_framebuffer_unpin(val);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
}
/* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
if (pinned_fb != fb) {
- int ret;
+ int ret = 0;
DBG("%p -> %p", pinned_fb, fb);
- if (fb)
+ if (fb) {
drm_framebuffer_reference(fb);
-
- ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+ ret = omap_framebuffer_pin(fb);
+ }
if (pinned_fb)
- drm_framebuffer_unreference(pinned_fb);
+ drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
- if (fb)
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_unreference(fb);
omap_plane->pinned_fb = NULL;
return ret;
}
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
struct omap_plane *omap_plane =
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
+ struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_overlay_info *info = &omap_plane->info;
- struct drm_gem_object *bo = NULL;
struct callback cb;
cb = omap_plane->apply_done_cb;
omap_plane->apply_done_cb.fxn = NULL;
- while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
- omap_gem_put_paddr(bo);
- drm_gem_object_unreference_unlocked(bo);
- }
+ drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
if (cb.fxn)
cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
omap_plane_disable(plane);
drm_plane_cleanup(plane);
- WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
- kfifo_free(&omap_plane->unpin_fifo);
+ drm_flip_work_cleanup(&omap_plane->unpin_work);
kfree(omap_plane);
}
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
goto fail;
- ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+ ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+ "unpin", unpin_worker);
if (ret) {
dev_err(dev->dev, "could not allocate unpin FIFO\n");
goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a660..514118ae72d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
- .fasync = drm_fasync,
.mmap = qxl_mmap,
};
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
- .dumb_destroy = qxl_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 7e96f4f1173..f7c9adde46a 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -328,7 +328,7 @@ struct qxl_device {
/* forward declaration for QXL_INFO_IO */
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
-extern struct drm_ioctl_desc qxl_ioctls[];
+extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -405,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
bool discardable, bool kernel,
struct qxl_surface *surf,
struct drm_gem_object **obj);
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr);
-void qxl_gem_object_unpin(struct drm_gem_object *obj);
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
@@ -427,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f..d34bb4130ff 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 25e1777fb0a..1648e4125af 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
return 0;
}
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr)
-{
- struct qxl_bo *qobj = obj->driver_private;
- int r;
-
- r = qxl_bo_reserve(qobj, false);
- if (unlikely(r != 0))
- return r;
- r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
- qxl_bo_unreserve(qobj);
- return r;
-}
-
-void qxl_gem_object_unpin(struct drm_gem_object *obj)
-{
- struct qxl_bo *qobj = obj->driver_private;
- int r;
-
- r = qxl_bo_reserve(qobj, false);
- if (likely(r == 0)) {
- qxl_bo_unpin(qobj);
- qxl_bo_unreserve(qobj);
- }
-}
-
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6de33563d6f..7b95c75e962 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -433,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
return ret;
}
-struct drm_ioctl_desc qxl_ioctls[] = {
+const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index aa161cddd87..8691c76c5ef 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -98,7 +98,6 @@ int qxl_bo_create(struct qxl_device *qdev,
kfree(bo);
return r;
}
- bo->gem_base.driver_private = NULL;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 8cb6167038e..d458a140c02 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b61449e52cd..0109a9644cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -88,7 +88,7 @@ qxl_release_free(struct qxl_device *qdev,
list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- entry->tv.bo->addr_space_offset
+ drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 1dfd84cda2a..037786d7c1d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -212,7 +212,9 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct qxl_bo *qbo = to_qxl_bo(bo);
+
+ return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
}
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a..c451257f08f 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 472c38fe123..5bd307cd8da 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = r128_compat_ioctl,
#endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.load = r128_driver_load,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5..56eb5e3f543 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
-extern struct drm_ioctl_desc r128_ioctls[];
+extern const struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9..01dd9aef9f0 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
r128_do_cleanup_cce(dev);
}
-struct drm_ioctl_desc r128_ioctls[] = {
+const struct drm_ioctl_desc r128_ioctls[] = {
DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c3df52c1a60..306364a1ecd 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,14 +72,32 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
- trinity_smc.o ni_dpm.o si_smc.o si_dpm.o
+ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
+ ci_dpm.o dce6_afmt.o
+
+# add async DMA block
+radeon-y += \
+ r600_dma.o \
+ rv770_dma.o \
+ evergreen_dma.o \
+ ni_dma.o \
+ si_dma.o \
+ cik_sdma.o \
+
+# add UVD block
+radeon-y += \
+ radeon_uvd.o \
+ uvd_v1_0.o \
+ uvd_v2_2.o \
+ uvd_v3_1.o \
+ uvd_v4_2.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 16b120c3f14..af10f8571d8 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
}ATOM_POWERPLAY_INFO_V3;
-/* New PPlib */
-/**************************************************************************/
-typedef struct _ATOM_PPLIB_THERMALCONTROLLER
-
-{
- UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
- UCHAR ucI2cLine; // as interpreted by DAL I2C
- UCHAR ucI2cAddress;
- UCHAR ucFanParameters; // Fan Control Parameters.
- UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
- UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
- UCHAR ucReserved; // ----
- UCHAR ucFlags; // to be defined
-} ATOM_PPLIB_THERMALCONTROLLER;
-
-#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
-
-#define ATOM_PP_THERMALCONTROLLER_NONE 0
-#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_LM64 5
-#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
-#define ATOM_PP_THERMALCONTROLLER_RV770 8
-#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
-#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
-#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
-#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
-#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
-#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
-#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
-#define ATOM_PP_THERMALCONTROLLER_LM96163 17
-#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
-
-// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
-// We probably should reserve the bit 0x80 for this use.
-// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
-// The driver can pick the correct internal controller based on the ASIC.
-
-#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
-#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
-
-typedef struct _ATOM_PPLIB_STATE
-{
- UCHAR ucNonClockStateIndex;
- UCHAR ucClockStateIndices[1]; // variable-sized
-} ATOM_PPLIB_STATE;
-
-
-typedef struct _ATOM_PPLIB_FANTABLE
-{
- UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
- UCHAR ucTHyst; // Temperature hysteresis. Integer.
- USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
- USHORT usTMed; // The middle temperature where we change slopes.
- USHORT usTHigh; // The high point above TMed for adjusting the second slope.
- USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
- USHORT usPWMMed; // The PWM value (in percent) at TMed.
- USHORT usPWMHigh; // The PWM value at THigh.
-} ATOM_PPLIB_FANTABLE;
-
-typedef struct _ATOM_PPLIB_FANTABLE2
-{
- ATOM_PPLIB_FANTABLE basicTable;
- USHORT usTMax; // The max temperature
-} ATOM_PPLIB_FANTABLE2;
-
-typedef struct _ATOM_PPLIB_EXTENDEDHEADER
-{
- USHORT usSize;
- ULONG ulMaxEngineClock; // For Overdrive.
- ULONG ulMaxMemoryClock; // For Overdrive.
- // Add extra system parameters here, always adjust size to include all fields.
- USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
- USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
- USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
- USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
-} ATOM_PPLIB_EXTENDEDHEADER;
-
-//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
-#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
-#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
-#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
-#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
-#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
-#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
-#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
-#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
-#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
-#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
-#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
-#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
-#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
-#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
-#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
-#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
-#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
-#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
-#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
-#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
-#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
-#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE
-{
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- UCHAR ucDataRevision;
-
- UCHAR ucNumStates;
- UCHAR ucStateEntrySize;
- UCHAR ucClockInfoSize;
- UCHAR ucNonClockSize;
-
- // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
- USHORT usStateArrayOffset;
-
- // offset from start of this table to array of ASIC-specific structures,
- // currently ATOM_PPLIB_CLOCK_INFO.
- USHORT usClockInfoArrayOffset;
-
- // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
- USHORT usNonClockInfoArrayOffset;
-
- USHORT usBackbiasTime; // in microseconds
- USHORT usVoltageTime; // in microseconds
- USHORT usTableSize; //the size of this structure, or the extended structure
-
- ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
-
- ATOM_PPLIB_THERMALCONTROLLER sThermalController;
-
- USHORT usBootClockInfoOffset;
- USHORT usBootNonClockInfoOffset;
-
-} ATOM_PPLIB_POWERPLAYTABLE;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
-{
- ATOM_PPLIB_POWERPLAYTABLE basicTable;
- UCHAR ucNumCustomThermalPolicy;
- USHORT usCustomThermalPolicyArrayOffset;
-}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
-{
- ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
- USHORT usFormatID; // To be used ONLY by PPGen.
- USHORT usFanTableOffset;
- USHORT usExtendendedHeaderOffset;
-} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
-{
- ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
- ULONG ulGoldenPPID; // PPGen use only
- ULONG ulGoldenRevision; // PPGen use only
- USHORT usVddcDependencyOnSCLKOffset;
- USHORT usVddciDependencyOnMCLKOffset;
- USHORT usVddcDependencyOnMCLKOffset;
- USHORT usMaxClockVoltageOnDCOffset;
- USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
- USHORT usMvddDependencyOnMCLKOffset;
-} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
-{
- ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
- ULONG ulTDPLimit;
- ULONG ulNearTDPLimit;
- ULONG ulSQRampingThreshold;
- USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
- ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
- USHORT usTDPODLimit;
- USHORT usLoadLineSlope; // in milliOhms * 100
-} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
-// 2, 4, 6, 7 are reserved
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
-#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
-#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
-#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
-#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
-#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
-#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
-#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
-#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
-#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
-
-//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
-#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
-#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
-
-// 0 is 2.5Gb/s, 1 is 5Gb/s
-#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
-#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
-
-// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
-
-// lookup into reduced refresh-rate table
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
-
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
-// 2-15 TBD as needed.
-
-#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
-#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
-
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
-
-#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-
-//memory related flags
-#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
-
-//M3 Arb //2bits, current 3 sets of parameters in total
-#define ATOM_PPLIB_M3ARB_MASK 0x00060000
-#define ATOM_PPLIB_M3ARB_SHIFT 17
-
-#define ATOM_PPLIB_ENABLE_DRR 0x00080000
-
-// remaining 16 bits are reserved
-typedef struct _ATOM_PPLIB_THERMAL_STATE
-{
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucThermalAction;
-}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-typedef struct _ATOM_PPLIB_NONCLOCK_INFO
-{
- USHORT usClassification;
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- ULONG ulCapsAndSettings;
- UCHAR ucRequiredPower;
- USHORT usClassification2;
- ULONG ulVCLK;
- ULONG ulDCLK;
- UCHAR ucUnused[5];
-} ATOM_PPLIB_NONCLOCK_INFO;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usUnused1;
- USHORT usUnused2;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_R600_CLOCK_INFO;
-
-// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
-#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
-#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
-#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
-#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
-
-typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- USHORT usUnused;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- UCHAR ucPCIEGen;
- UCHAR ucUnused1;
-
- ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
-
-} ATOM_PPLIB_SI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- UCHAR ucPCIEGen;
- USHORT usPCIELane;
-} ATOM_PPLIB_CI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
-
-{
- USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
- UCHAR ucLowEngineClockHigh;
- USHORT usHighEngineClockLow; // High Engine clock in MHz.
- UCHAR ucHighEngineClockHigh;
- USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
- UCHAR ucMemoryClockHigh; // Currentyl unused.
- UCHAR ucPadding; // For proper alignment and size.
- USHORT usVDDC; // For the 780, use: None, Low, High, Variable
- UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
- USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
-} ATOM_PPLIB_RS780_CLOCK_INFO;
-
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
-
-#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
-#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
-#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
-
-typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
- USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
- UCHAR ucEngineClockHigh; //clockfrequency >> 16.
- UCHAR vddcIndex; //2-bit vddc index;
- USHORT tdpLimit;
- //please initalize to 0
- USHORT rsv1;
- //please initialize to 0s
- ULONG rsv2[2];
-}ATOM_PPLIB_SUMO_CLOCK_INFO;
-
-
-
-typedef struct _ATOM_PPLIB_STATE_V2
-{
- //number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
- UCHAR ucNumDPMLevels;
-
- //a index to the array of nonClockInfos
- UCHAR nonClockInfoIndex;
- /**
- * Driver will read the first ucNumDPMLevels in this array
- */
- UCHAR clockInfoIndex[1];
-} ATOM_PPLIB_STATE_V2;
-
-typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
-}StateArray;
-
-
-typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[1];
-}ClockInfoArray;
-
-typedef struct _NonClockInfoArray{
-
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
-}NonClockInfoArray;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
-{
- USHORT usClockLow;
- UCHAR ucClockHigh;
- USHORT usVoltage;
-}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
-{
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
- USHORT usVddc;
- USHORT usVddci;
-}ATOM_PPLIB_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Record
-{
- USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
- ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
-}ATOM_PPLIB_CAC_Leakage_Record;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_CAC_Leakage_Table;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
-{
- USHORT usVoltage;
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
-}ATOM_PPLIB_PhaseSheddingLimits_Record;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_PhaseSheddingLimits_Table;
-
-typedef struct _VCEClockInfo{
- USHORT usEVClkLow;
- UCHAR ucEVClkHigh;
- USHORT usECClkLow;
- UCHAR ucECClkHigh;
-}VCEClockInfo;
-
-typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
-}VCEClockInfoArray;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucVCEClockInfoIndex;
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_VCE_State_Record
-{
- UCHAR ucVCEClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_VCE_State_Record;
-
-typedef struct _ATOM_PPLIB_VCE_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
-}ATOM_PPLIB_VCE_State_Table;
-
-
-typedef struct _ATOM_PPLIB_VCE_Table
-{
- UCHAR revid;
-// VCEClockInfoArray array;
-// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_VCE_State_Table states;
-}ATOM_PPLIB_VCE_Table;
-
-
-typedef struct _UVDClockInfo{
- USHORT usVClkLow;
- UCHAR ucVClkHigh;
- USHORT usDClkLow;
- UCHAR ucDClkHigh;
-}UVDClockInfo;
-
-typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
-}UVDClockInfoArray;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucUVDClockInfoIndex;
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_UVD_State_Record
-{
- UCHAR ucUVDClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_UVD_State_Record;
-
-typedef struct _ATOM_PPLIB_UVD_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_State_Record entries[1];
-}ATOM_PPLIB_UVD_State_Table;
-
-
-typedef struct _ATOM_PPLIB_UVD_Table
-{
- UCHAR revid;
-// UVDClockInfoArray array;
-// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_UVD_State_Table states;
-}ATOM_PPLIB_UVD_Table;
-
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
-{
- USHORT usVoltage;
- USHORT usSAMClockLow;
- UCHAR ucSAMClockHigh;
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_SAMU_Table
-{
- UCHAR revid;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_SAMU_Table;
-
-#define ATOM_PPM_A_A 1
-#define ATOM_PPM_A_I 2
-typedef struct _ATOM_PPLIB_PPM_Table
-{
- UCHAR ucRevId;
- UCHAR ucPpmDesign; //A+I or A+A
- USHORT usCpuCoreNumber;
- ULONG ulPlatformTDP;
- ULONG ulSmallACPlatformTDP;
- ULONG ulPlatformTDC;
- ULONG ulSmallACPlatformTDC;
- ULONG ulApuTDP;
- ULONG ulDGpuTDP;
- ULONG ulDGpuUlvPower;
- ULONG ulTjmax;
-} ATOM_PPLIB_PPM_Table;
-
-/**************************************************************************/
-
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -8485,3 +7873,6 @@ typedef struct {
#endif /* _ATOMBIOS_H */
+
+#include "pptable.h"
+
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b9d3b43f19c..bf87f6d435f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* disable the GRPH */
+ if (ASIC_IS_DCE4(rdev))
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+ else if (ASIC_IS_AVIVO(rdev))
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 32501f6ec99..00885417fff 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -50,7 +50,7 @@ static char *pre_emph_names[] = {
* or from atom. Note that atom operates on
* dw units.
*/
-static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
@@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- radeon_copy_swap(base, send, send_bytes, true);
+ radeon_atom_copy_swap(base, send, send_bytes, true);
args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
@@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- radeon_copy_swap(recv, base + 16, recv_bytes, false);
+ radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
@@ -585,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+ DRM_DEBUG_KMS("link status %6ph\n", link_status);
return true;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 092275d53d4..dfac7965ea2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338df708..deaf98cdca3 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,10 +27,12 @@
#include "radeon.h"
#include "atom.h"
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
-#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_WRITE 3
#define ATOM_MAX_HW_I2C_READ 255
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -50,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
- DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- memcpy(&out, buf, num);
+ args.ucRegIndex = buf[0];
+ if (num > 1)
+ memcpy(&out, &buf[1], num - 1);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
+ args.ucRegIndex = 0;
+ args.lpI2CDataOut = 0;
}
+ args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
- args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
@@ -77,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
}
if (!(flags & HW_I2C_WRITE))
- memcpy(buf, base, num);
+ radeon_atom_copy_swap(buf, base, num, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 9953e1fbc46..084e69414fd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2699,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
else
rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 19a0114d2e3..98d009e154b 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -317,58 +317,4 @@ const u32 cayman_default_state[] =
0x00000010, /* */
};
-const u32 cayman_vs[] =
-{
- 0x00000004,
- 0x80400400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15000688,
- 0x00000000,
- 0x88000000,
- 0x04000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x00020000,
-#else
- 0x00000000,
-#endif
- 0x00000000,
- 0x04000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 cayman_ps[] =
-{
- 0x00000004,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95000688,
- 0x00000000,
- 0x88000000,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
-const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 00000000000..3cce533397c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5243 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "ci_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+ { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
+ { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+ { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
+ { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+ u32 arb_freq_src, u32 arb_freq_dest);
+extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
+extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
+extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table);
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp);
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+
+static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
+{
+ struct ci_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ switch (rdev->pdev->device) {
+ case 0x6650:
+ case 0x6658:
+ case 0x665C:
+ default:
+ pi->powertune_defaults = &defaults_bonaire_xt;
+ break;
+ case 0x6651:
+ case 0x665D:
+ pi->powertune_defaults = &defaults_bonaire_pro;
+ break;
+ case 0x6640:
+ pi->powertune_defaults = &defaults_saturn_xt;
+ break;
+ case 0x6641:
+ pi->powertune_defaults = &defaults_saturn_pro;
+ break;
+ }
+
+ pi->dte_tj_offset = 0;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = false;
+ pi->caps_sq_ramping = false;
+ pi->caps_db_ramping = false;
+ pi->caps_td_ramping = false;
+ pi->caps_tcp_ramping = false;
+
+ if (pi->caps_power_containment) {
+ pi->caps_cac = true;
+ pi->enable_bapm_feature = true;
+ pi->enable_tdc_limit_feature = true;
+ pi->enable_pkg_pwr_tracking_feature = true;
+ }
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+ return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+ u32 i;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+ return -EINVAL;
+
+ for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+ hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+ hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+ } else {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+ hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+ }
+ }
+ return 0;
+}
+
+static int ci_populate_vddc_vid(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *vid = pi->smc_powertune_table.VddCVid;
+ u32 i;
+
+ if (pi->vddc_voltage_table.count > 8)
+ return -EINVAL;
+
+ for (i = 0; i < pi->vddc_voltage_table.count; i++)
+ vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_populate_svi_load_line(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+ pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+ pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+ pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+ pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int ci_populate_tdc_limit(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ u16 tdc_limit;
+
+ tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+ pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+ pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ pt_defaults->tdc_vddc_throttle_release_limit_perc;
+ pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int ci_populate_dw8(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable) +
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+ (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+ pi->sram_end);
+ if (ret)
+ return -EINVAL;
+ else
+ pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
+ }
+
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+ pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+ u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+
+ hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+ lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+ pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+ pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+ return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
+ int i, j, k;
+ const u16 *def1;
+ const u16 *def2;
+
+ dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+ dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+ dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+ dpm_table->GpuTjMax =
+ (u8)(pi->thermal_temp_setting.temperature_high / 1000);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+ } else {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+ }
+
+ dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+ def1 = pt_defaults->bapmti_r;
+ def2 = pt_defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_pm_base(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 pm_fuse_table_offset;
+ int ret;
+
+ if (pi->caps_power_containment) {
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, pi->sram_end);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_vid_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_vddc_vid(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_svi_load_line(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_tdc_limit(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_dw8(rdev);
+ if (ret)
+ return ret;
+ ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
+ (u8 *)&pi->smc_powertune_table,
+ sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int ci_program_pt_config_registers(struct radeon_device *rdev,
+ const struct ci_pt_config_reg *cac_config_regs)
+{
+ const struct ci_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ cache = 0;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int ci_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(rdev, didt_config_ci);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (enable) {
+ pi->power_containment_features = 0;
+ if (pi->caps_power_containment) {
+ if (pi->enable_bapm_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+ }
+
+ if (pi->enable_tdc_limit_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+ }
+
+ if (pi->enable_pkg_pwr_tracking_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ } else {
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 default_pwr_limit =
+ (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+ ci_set_power_limit(rdev, default_pwr_limit);
+ }
+ }
+ }
+ } else {
+ if (pi->caps_power_containment && pi->power_containment_features) {
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
+ pi->power_containment_features = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ pi->cac_enabled = false;
+ } else {
+ pi->cac_enabled = true;
+ }
+ } else if (pi->cac_enabled) {
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_power_control_set_level(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ s32 adjust_percent;
+ s32 target_tdp;
+ int ret = 0;
+ bool adjust_polarity = false; /* ??? */
+
+ if (pi->caps_power_containment &&
+ (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+ adjust_percent = adjust_polarity ?
+ rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
+ target_tdp = ((100 + adjust_percent) *
+ (s32)cac_tdp_table->configurable_tdp) / 100;
+ target_tdp *= 256;
+
+ ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
+ }
+
+ return ret;
+}
+
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ ci_update_uvd_dpm(rdev, gate);
+}
+
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching;
+ u32 sclk, mclk;
+ int i;
+
+ if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ ci_dpm_vblank_too_short(rdev))
+ disable_mclk_switching = true;
+ else
+ disable_mclk_switching = false;
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (rdev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ }
+
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+
+ if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+ ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+ if (disable_mclk_switching) {
+ if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+ ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+ } else {
+ if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+ ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+ }
+}
+
+static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT);
+ tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
+ tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
+ CI_DIG_THERM_INTL(low_temp / 1000);
+ WREG32_SMC(CG_THERMAL_INT, tmp);
+
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DIG_THERM_DPM_MASK;
+ tmp |= DIG_THERM_DPM(high_temp / 1000);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ return 0;
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_read_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_write_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ table->FpsHighT = cpu_to_be16(tmp);
+
+ tmp = 30;
+ table->FpsLowT = cpu_to_be16(tmp);
+ }
+}
+
+static int ci_update_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret = 0;
+ u32 low_sclk_interrupt_t = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+
+ }
+
+ return ret;
+}
+
+static void ci_get_leakage_voltages(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 leakage_id, virtual_voltage_id;
+ u16 vddc, vddci;
+ int i;
+
+ pi->vddc_leakage.count = 0;
+ pi->vddci_leakage.count = 0;
+
+ if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
+ virtual_voltage_id,
+ leakage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+ pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+ pi->vddci_leakage.count++;
+ }
+ }
+ }
+ }
+}
+
+static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool want_thermal_protection;
+ enum radeon_dpm_event_src dpm_event_src;
+ u32 tmp;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DPM_EVENT_SRC_MASK;
+ tmp |= DPM_EVENT_SRC(dpm_event_src);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ if (pi->thermal_protection)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ } else {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
+ enum radeon_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ pi->need_update_smu7_dpm_table = 0;
+ return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (enable) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x05);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x05);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
+
+ udelay(10);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
+ }
+ } else {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_start_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+ WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, true);
+ if (ret)
+ return ret;
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_stop_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp &= ~DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, false);
+ if (ret)
+ return ret;
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ if (enable)
+ tmp &= ~SCLK_PWRMGT_OFF;
+ else
+ tmp |= SCLK_PWRMGT_OFF;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
+ bool ac_power)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 power_limit;
+
+ if (ac_power)
+ power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+ else
+ power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+ ci_set_power_limit(rdev, power_limit);
+
+ if (pi->caps_automatic_dc_transition) {
+ if (ac_power)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
+ else
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
+ }
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(SMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(rdev, msg);
+}
+
+static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 *parameter)
+{
+ PPSMC_Result smc_result;
+
+ smc_result = ci_send_msg_to_smc(rdev, msg);
+
+ if ((smc_result == PPSMC_Result_OK) && parameter)
+ *parameter = RREG32(SMC_MSG_ARG_0);
+
+ return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->sclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->mclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp)
+{
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ return 0;
+}
+
+static int ci_set_boot_state(struct radeon_device *rdev)
+{
+ return ci_enable_sclk_mclk_dpm(rdev, false);
+}
+
+static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
+{
+ u32 sclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetSclkFrequency,
+ &sclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ sclk_freq = 0;
+
+ return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
+{
+ u32 mclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetMclkFrequency,
+ &mclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ mclk_freq = 0;
+
+ return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct radeon_device *rdev)
+{
+ int i;
+
+ ci_program_jump_on_start(rdev);
+ ci_start_smc_clock(rdev);
+ ci_start_smc(rdev);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
+ break;
+ }
+}
+
+static void ci_dpm_stop_smc(struct radeon_device *rdev)
+{
+ ci_reset_smc(rdev);
+ ci_stop_smc_clock(rdev);
+}
+
+static int ci_process_firmware_header(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->dpm_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->soft_regs_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->mc_reg_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->fan_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->arb_table_start = tmp;
+
+ return 0;
+}
+
+static void ci_read_clock_registers(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->clock_registers.cg_spll_func_cntl =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL);
+ pi->clock_registers.cg_spll_func_cntl_2 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
+ pi->clock_registers.cg_spll_func_cntl_3 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
+ pi->clock_registers.cg_spll_func_cntl_4 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
+ pi->clock_registers.cg_spll_spread_spectrum =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ pi->clock_registers.cg_spll_spread_spectrum_2 =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
+ pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+ pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+ pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+ pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+ pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+ pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+ pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ if (enable)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= STATIC_PM_EN;
+
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct radeon_device *rdev)
+{
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int ci_exit_ulp_state(struct radeon_device *rdev)
+{
+ int i;
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(SMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct radeon_device *rdev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct radeon_device *rdev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ } else {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_program_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+ u32 pre_vbi_time_in_us;
+ u32 frame_time_in_us;
+ u32 ref_clock = rdev->clock.spll.reference_freq;
+ u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+
+ tmp &= ~DISP_GAP_MASK;
+ if (rdev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+
+ if (refresh_rate == 0)
+ refresh_rate = 60;
+ if (vblank_time == 0xffffffff)
+ vblank_time = 500;
+ frame_time_in_us = 1000000 / refresh_rate;
+ pre_vbi_time_in_us =
+ frame_time_in_us - 200 - vblank_time;
+ tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+ ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (enable) {
+ if (pi->caps_sclk_ss_support) {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+ } else {
+ tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ tmp &= ~SSEN;
+ WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_program_sstp(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void ci_enable_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+ tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
+ WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
+ WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
+ WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
+ WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
+ WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
+ WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
+ WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, 0);
+ WREG32_SMC(CG_FTV_1, 0);
+ WREG32_SMC(CG_FTV_2, 0);
+ WREG32_SMC(CG_FTV_3, 0);
+ WREG32_SMC(CG_FTV_4, 0);
+ WREG32_SMC(CG_FTV_5, 0);
+ WREG32_SMC(CG_FTV_6, 0);
+ WREG32_SMC(CG_FTV_7, 0);
+}
+
+static int ci_upload_firmware(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int i, ret;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
+ break;
+ }
+ WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
+
+ ci_stop_smc_clock(rdev);
+ ci_reset_smc(rdev);
+
+ ret = ci_load_smc_ucode(rdev, pi->sram_end);
+
+ return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int ci_construct_voltage_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
+ &pi->vddc_voltage_table);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
+ &pi->vddci_voltage_table);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
+ &pi->mvdd_voltage_table);
+
+ return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+ int ret;
+
+ ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
+ &smc_voltage_table->StdVoltageHiSidd,
+ &smc_voltage_table->StdVoltageLoSidd);
+
+ if (ret) {
+ smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+ smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+ smc_voltage_table->StdVoltageHiSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+ smc_voltage_table->StdVoltageLoSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->VddcLevelCount = pi->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddc_voltage_table.entries[count],
+ &table->VddcLevel[count]);
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddcLevel[count].Smio |=
+ pi->vddc_voltage_table.entries[count].smio_low;
+ else
+ table->VddcLevel[count].Smio = 0;
+ }
+ table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ table->VddciLevelCount = pi->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddci_voltage_table.entries[count],
+ &table->VddciLevel[count]);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddciLevel[count].Smio |=
+ pi->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio = 0;
+ }
+ table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->MvddLevelCount = pi->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->mvdd_voltage_table.entries[count],
+ &table->MvddLevel[count]);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->MvddLevel[count].Smio |=
+ pi->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio = 0;
+ }
+ table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ int ret;
+
+ ret = ci_populate_smc_vddc_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vddci_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_mvdd_table(rdev, table);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+ if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+ voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+ u16 v_index, idx;
+ bool voltage_found = false;
+ *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+ *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value ==
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value <=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 sclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (sclk < limits->entries[i].sclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 mclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (mclk < limits->entries[i].mclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static int ci_init_arb_table_index(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
+ tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ u32 clock, u32 *voltage)
+{
+ u32 i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *voltage = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+ return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ u32 i;
+ u32 tmp;
+ u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ tmp = sclk / (1 << i);
+ if (tmp >= min || i == 0)
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+ return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct radeon_device *rdev)
+{
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
+ u32 sclk,
+ u32 mclk,
+ SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
+
+ dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+ arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
+ arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+ arb_regs->McArbBurstTime = (u8)burst_time;
+
+ return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
+ u32 i, j;
+ int ret = 0;
+
+ memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+ ret = ci_populate_memory_timing_parameters(rdev,
+ pi->dpm_table.sclk_table.dpm_levels[i].value,
+ pi->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret == 0)
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->arb_table_start,
+ (u8 *)&arb_regs,
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
+ pi->sram_end);
+
+ return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->need_update_smu7_dpm_table == 0)
+ return 0;
+
+ return ci_do_program_memory_timing_parameters(rdev);
+}
+
+static void ci_populate_smc_initial_state(struct radeon_device *rdev,
+ struct radeon_ps *radeon_boot_state)
+{
+ struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 level = 0;
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+ boot_state->performance_levels[0].sclk) {
+ pi->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+ boot_state->performance_levels[0].mclk) {
+ pi->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+ u32 i;
+ u32 mask_value = 0;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask_value = mask_value << 1;
+ if (dpm_table->dpm_levels[i-1].enabled)
+ mask_value |= 0x1;
+ else
+ mask_value &= 0xFFFFFFFE;
+ }
+
+ return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 i;
+
+ for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = cpu_to_be32(5);
+ table->LinkLevel[i].UpT = cpu_to_be32(30);
+ }
+
+ pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->UvdLevelCount =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].VclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].DclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+ table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+ table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+ table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_vce_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->VceLevelCount =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->VceLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+ table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+ }
+
+ return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->AcpLevelCount = (u8)
+ (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+ table->AcpLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->AcpLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+ table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_samu_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->SamuLevelCount =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+ table->SamuLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->SamuLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+ table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_calculate_mclk_params(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~BWCTRL_MASK;
+ mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+ mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+ mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+ CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+ mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+ if (pi->mem_gddr5) {
+ mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+ YCLK_POST_DIV(mpll_param.post_div);
+ }
+
+ if (pi->caps_mclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+
+ if (pi->mem_gddr5)
+ freq_nom = memory_clock * 4;
+ else
+ freq_nom = memory_clock * 2;
+
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
+
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+ else
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static int ci_populate_single_memory_level(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *memory_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+ bool dll_state_on;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddc);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddci);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ memory_clock, &memory_level->MinMvdd);
+ if (ret)
+ return ret;
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_mclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ memory_clock,
+ &memory_level->MinVddcPhases);
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
+ memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+ memory_level->StutterEnable = false;
+ memory_level->StrobeEnable = false;
+ memory_level->EdcReadEnable = false;
+ memory_level->EdcWriteEnable = false;
+ memory_level->RttEnable = false;
+
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+ (pi->uvd_enabled == false) &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (rdev->pm.dpm.new_active_crtc_count <= 2))
+ memory_level->StutterEnable = true;
+
+ if (pi->mclk_strobe_mode_threshold &&
+ (memory_clock <= pi->mclk_strobe_mode_threshold))
+ memory_level->StrobeEnable = 1;
+
+ if (pi->mem_gddr5) {
+ memory_level->StrobeRatio =
+ si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+ if (pi->mclk_edc_enable_threshold &&
+ (memory_clock > pi->mclk_edc_enable_threshold))
+ memory_level->EdcReadEnable = true;
+
+ if (pi->mclk_edc_wr_enable_threshold &&
+ (memory_clock > pi->mclk_edc_wr_enable_threshold))
+ memory_level->EdcWriteEnable = true;
+
+ if (memory_level->StrobeEnable) {
+ if (si_get_mclk_frequency_ratio(memory_clock, true) >=
+ ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = pi->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+ if (ret)
+ return ret;
+
+ memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+ memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+ memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+ memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+ memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+ memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+ memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+ memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+ memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+ memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+ memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+ memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+ memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+ return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ SMU7_Discrete_VoltageLevel voltage_level;
+ u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ int ret;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc)
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+ table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ table->ACPILevel.SclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl &= ~SPLL_PWRON;
+ spll_func_cntl |= SPLL_RESET;
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+ table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+ table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+ table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+ table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+ table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+ table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+ table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+ table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+ table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+ table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+ table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+ table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ if (pi->acpi_vddci)
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+ }
+
+ if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd = 0;
+ else
+ table->MemoryACPILevel.MinMvdd =
+ cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+ mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+ table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+ table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+ table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ cpu_to_be16((u16)pi->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ table->MemoryACPILevel.StrobeEnable = false;
+ table->MemoryACPILevel.EdcReadEnable = false;
+ table->MemoryACPILevel.EdcWriteEnable = false;
+ table->MemoryACPILevel.RttEnable = false;
+
+ return 0;
+}
+
+
+static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+
+ if (ulv->supported) {
+ if (enable)
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_populate_ulv_level(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ if (ulv_voltage == 0) {
+ pi->ulv.supported = false;
+ return 0;
+ }
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffset = 0;
+ else
+ state->VddcOffset =
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+ } else {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffsetVid = 0;
+ else
+ state->VddcOffsetVid = (u8)
+ ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+ state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+ state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+ state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct radeon_device *rdev,
+ u32 engine_clock,
+ SMU7_Discrete_GraphicsLevel *sclk)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+ fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
+
+ if (pi->caps_sclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~CLK_S_MASK;
+ cg_spll_spread_spectrum |= CLK_S(clk_s);
+ cg_spll_spread_spectrum |= SSEN;
+
+ cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (u8)dividers.post_divider;
+
+ return 0;
+}
+
+static int ci_populate_single_graphic_level(struct radeon_device *rdev,
+ u32 engine_clock,
+ u16 sclk_activity_level_t,
+ SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
+ if (ret)
+ return ret;
+
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ engine_clock, &graphic_level->MinVddc);
+ if (ret)
+ return ret;
+
+ graphic_level->SclkFrequency = engine_clock;
+
+ graphic_level->Flags = 0;
+ graphic_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ graphic_level->ActivityLevel = sclk_activity_level_t;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ graphic_level->EnabledForActivity = 1;
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpH = 0;
+ graphic_level->DownH = 0;
+ graphic_level->VoltageDownH = 0;
+ graphic_level->PowerThrottle = 0;
+
+ if (pi->caps_sclk_ds)
+ graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
+ engine_clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+ graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+ graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+ graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+ graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+ graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+ graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+ graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+ graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ ret = ci_populate_single_graphic_level(rdev,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (u16)pi->activity_target[i],
+ &pi->smc_state_table.GraphicsLevel[i]);
+ if (ret)
+ return ret;
+ if (i == (dpm_table->sclk_table.count - 1))
+ pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_ulv_state(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *ulv_level)
+{
+ return ci_populate_ulv_level(rdev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+ SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+ return -EINVAL;
+ ret = ci_populate_single_memory_level(rdev,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &pi->smc_state_table.MemoryLevel[i]);
+ if (ret)
+ return ret;
+ }
+
+ pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+ pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_reset_single_dpm_table(struct radeon_device *rdev,
+ struct ci_single_dpm_table* dpm_table,
+ u32 count)
+{
+ u32 i;
+
+ dpm_table->count = count;
+ for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+ dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+ u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+ dpm_table->dpm_levels[index].value = pcie_gen;
+ dpm_table->dpm_levels[index].param1 = pcie_lanes;
+ dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+ return -EINVAL;
+
+ if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+ pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+ } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+ pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+ }
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.pcie_speed_table,
+ SMU7_MAX_LEVELS_LINK);
+
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+ pi->pcie_gen_powersaving.max,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+ pi->pcie_gen_performance.max,
+ pi->pcie_lane_performance.max);
+
+ pi->dpm_table.pcie_speed_table.count = 6;
+
+ return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_cac_leakage_table *std_voltage_table =
+ &rdev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_table->count < 1)
+ return -EINVAL;
+
+ memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.sclk_table,
+ SMU7_MAX_LEVELS_GRAPHICS);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mclk_table,
+ SMU7_MAX_LEVELS_MEMORY);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddc_table,
+ SMU7_MAX_LEVELS_VDDC);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddci_table,
+ SMU7_MAX_LEVELS_VDDCI);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mvdd_table,
+ SMU7_MAX_LEVELS_MVDD);
+
+ pi->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ if ((i == 0) ||
+ (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+ allowed_sclk_vddc_table->entries[i].clk)) {
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+ allowed_sclk_vddc_table->entries[i].clk;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+ pi->dpm_table.sclk_table.count++;
+ }
+ }
+
+ pi->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ if ((i==0) ||
+ (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+ allowed_mclk_table->entries[i].clk)) {
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+ allowed_mclk_table->entries[i].clk;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+ pi->dpm_table.mclk_table.count++;
+ }
+ }
+
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ pi->dpm_table.vddc_table.dpm_levels[i].value =
+ allowed_sclk_vddc_table->entries[i].v;
+ pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+ std_voltage_table->entries[i].leakage;
+ pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.vddci_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+ }
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.mvdd_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+ }
+
+ ci_setup_default_pcie_tables(rdev);
+
+ return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+ u32 value, u32 *boot_level)
+{
+ u32 i;
+ int ret = -EINVAL;
+
+ for(i = 0; i < table->count; i++) {
+ if (value == table->dpm_levels[i].value) {
+ *boot_level = i;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_init_smc_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+ struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+ int ret;
+
+ ret = ci_setup_default_dpm_tables(rdev);
+ if (ret)
+ return ret;
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+ ci_populate_smc_voltage_tables(rdev, table);
+
+ ci_init_fps_limits(rdev);
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (pi->mem_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (ulv->supported) {
+ ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
+ if (ret)
+ return ret;
+ WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ }
+
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+
+ ci_populate_smc_link_level(rdev, table);
+
+ ret = ci_populate_smc_acpi_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vce_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_acp_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_samu_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_do_program_memory_timing_parameters(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_uvd_level(rdev, table);
+ if (ret)
+ return ret;
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+ pi->vbios_boot_state.sclk_bootup_value,
+ (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+ ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+ pi->vbios_boot_state.mclk_bootup_value,
+ (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+ table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+ table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+ table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+ ci_populate_smc_initial_state(rdev, radeon_boot_state);
+
+ ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
+ if (ret)
+ return ret;
+
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ table->SystemFlags = cpu_to_be32(table->SystemFlags);
+ table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+ table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+ table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+ table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+ table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+ table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+ table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+ table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+ table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+ table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+ table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (u8 *)&table->SystemFlags,
+ sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_trim_single_dpm_states(struct radeon_device *rdev,
+ struct ci_single_dpm_table *dpm_table,
+ u32 low_limit, u32 high_limit)
+{
+ u32 i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit) ||
+ (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+}
+
+static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
+ u32 speed_low, u32 lanes_low,
+ u32 speed_high, u32 lanes_high)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+ u32 i, j;
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if ((pcie_table->dpm_levels[i].value < speed_low) ||
+ (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+ (pcie_table->dpm_levels[i].value > speed_high) ||
+ (pcie_table->dpm_levels[i].param1 > lanes_high))
+ pcie_table->dpm_levels[i].enabled = false;
+ else
+ pcie_table->dpm_levels[i].enabled = true;
+ }
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_table->dpm_levels[i].enabled) {
+ for (j = i + 1; j < pcie_table->count; j++) {
+ if (pcie_table->dpm_levels[j].enabled) {
+ if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+ (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+ pcie_table->dpm_levels[j].enabled = false;
+ }
+ }
+ }
+ }
+}
+
+static int ci_trim_dpm_states(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 high_limit_count;
+
+ if (state->performance_level_count < 1)
+ return -EINVAL;
+
+ if (state->performance_level_count == 1)
+ high_limit_count = 0;
+ else
+ high_limit_count = 1;
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.sclk_table,
+ state->performance_levels[0].sclk,
+ state->performance_levels[high_limit_count].sclk);
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.mclk_table,
+ state->performance_levels[0].mclk,
+ state->performance_levels[high_limit_count].mclk);
+
+ ci_trim_pcie_dpm_states(rdev,
+ state->performance_levels[0].pcie_gen,
+ state->performance_levels[0].pcie_lane,
+ state->performance_levels[high_limit_count].pcie_gen,
+ state->performance_levels[high_limit_count].pcie_lane);
+
+ return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
+{
+ struct radeon_clock_voltage_dependency_table *disp_voltage_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+ struct radeon_clock_voltage_dependency_table *vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 requested_voltage = 0;
+ u32 i;
+
+ if (disp_voltage_table == NULL)
+ return -EINVAL;
+ if (!disp_voltage_table->count)
+ return -EINVAL;
+
+ for (i = 0; i < disp_voltage_table->count; i++) {
+ if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+ requested_voltage = disp_voltage_table->entries[i].v;
+ }
+
+ for (i = 0; i < vddc_table->count; i++) {
+ if (requested_voltage <= vddc_table->entries[i].v) {
+ requested_voltage = vddc_table->entries[i].v;
+ return (ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VddC_Request,
+ requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result result;
+
+ if (!pi->sclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->pcie_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_PCIeDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ ci_apply_disp_minimum_voltage_request(rdev);
+
+ return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ u32 i;
+
+ pi->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count) {
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ } else {
+ /* XXX check display min clock requirements */
+ if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ if (rdev->pm.dpm.current_active_crtc_count !=
+ rdev->pm.dpm.new_active_crtc_count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ int ret;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+ dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+ dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_uvd_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = true;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ } else {
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = false;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_vce_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_samu_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+ }
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_acp_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ if (pi->caps_uvd_dpm ||
+ (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+ pi->smc_state_table.UvdBootLevel = 0;
+ else
+ pi->smc_state_table.UvdBootLevel =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~UvdBootLevel_MASK;
+ tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ u32 min_evclk = 30000; /* ??? */
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= min_evclk)
+ return i;
+ }
+
+ return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
+ bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
+ int ret = 0;
+ u32 tmp;
+
+ if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
+ if (new_vce_clock_non_zero) {
+ pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~VceBootLevel_MASK;
+ tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+
+ ret = ci_enable_vce_dpm(rdev, true);
+ } else {
+ ret = ci_enable_vce_dpm(rdev, false);
+ }
+ }
+ return ret;
+}
+
+static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ return ci_enable_samu_dpm(rdev, gate);
+}
+
+static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ pi->smc_state_table.AcpBootLevel = 0;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~AcpBootLevel_MASK;
+ tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_acp_dpm(rdev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_trim_dpm_states(rdev, radeon_state);
+ if (ret)
+ return ret;
+
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+ pi->last_mclk_dpm_enable_mask =
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ if (pi->uvd_enabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ }
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
+ u32 level_mask)
+{
+ u32 level = 0;
+
+ while ((level_mask & (1 << level)) == 0)
+ level++;
+
+ return level;
+}
+
+
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ u32 tmp, levels, i;
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_pcie(rdev, level);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ ret = ci_dpm_force_state_pcie(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+static int ci_set_mc_special_registers(struct radeon_device *rdev,
+ struct ci_mc_reg_table *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1 << 2) {
+ case MC_SEQ_MISC1:
+ temp_reg = RREG32(MC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(MC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (!pi->mem_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!pi->mem_gddr5) {
+ table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
+ table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case MC_SEQ_RESERVE_M:
+ temp_reg = RREG32(MC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+
+ switch(in_reg) {
+ case MC_SEQ_RAS_TIMING >> 2:
+ *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_DLL_STBY >> 2:
+ *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD0 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD1 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CTRL >> 2:
+ *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
+ break;
+ case MC_SEQ_CAS_TIMING >> 2:
+ *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING2 >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CMD >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CTL >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+ break;
+ case MC_PMG_CMD_EMRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS1 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ break;
+ case MC_SEQ_PMG_TIMING >> 2:
+ *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS2 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_2 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+ }
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+ struct ci_mc_reg_table *ci_table)
+{
+ u8 i, j;
+
+ if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ci_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ci_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++)
+ ci_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ ci_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_mc_reg_table *table;
+ struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+ u8 module_index = rv770_get_memory_module_index(rdev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+ WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+ WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
+ WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
+ WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
+ WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
+ WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
+ WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
+ WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+ WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+ WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+ WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+ WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+ WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+ WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+ WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+ WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+ ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_s0_mc_reg_index(ci_table);
+
+ ret = ci_set_mc_special_registers(rdev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_valid_flag(ci_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+ if (pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (u8)i;
+
+ return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+ const u32 memory_clock,
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, pi->mc_reg_table.last,
+ pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+ ci_convert_mc_reg_table_entry_to_smc(rdev,
+ pi->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
+ if (ret)
+ return ret;
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start,
+ (u8 *)&pi->smc_mc_reg_table,
+ sizeof(SMU7_Discrete_MCRegisters),
+ pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start +
+ offsetof(SMU7_Discrete_MCRegisters, data[0]),
+ (u8 *)&pi->smc_mc_reg_table.data[0],
+ sizeof(SMU7_Discrete_MCRegisterSet) *
+ pi->dpm_table.mclk_table.count,
+ pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= VOLT_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+
+ return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
+{
+ u32 speed_cntl = 0;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
+{
+ u32 link_width = 0;
+
+ link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
+ link_width >>= LC_LINK_WIDTH_RD_SHIFT;
+
+ switch (link_width) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ enum radeon_pcie_gen current_link_speed;
+
+ if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
+ current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
+ else
+ current_link_speed = pi->force_pcie_gen;
+
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+ pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+#ifdef CONFIG_ACPI
+ case RADEON_PCIE_GEN3:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ pi->force_pcie_gen = RADEON_PCIE_GEN2;
+ if (current_link_speed == RADEON_PCIE_GEN2)
+ break;
+ case RADEON_PCIE_GEN2:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+#endif
+ default:
+ pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ pi->pspp_notify_required = true;
+ }
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ u8 request;
+
+ if (pi->pspp_notify_required) {
+ if (target_link_speed == RADEON_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == RADEON_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (ci_get_current_pcie_speed(rdev) > 0))
+ return;
+
+#ifdef CONFIG_ACPI
+ radeon_acpi_pcie_performance_request(rdev, request, false);
+#endif
+ }
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table->count < 1)
+ return -EINVAL;
+
+ pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+ pi->max_vddc_in_pp_table =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+ pi->max_vddci_in_pp_table =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddc) {
+ *vddc = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddci) {
+ *vddci = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_vce_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_uvd_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_phase_shedding_limits_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
+ }
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ if (table) {
+ ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
+ ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
+ }
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_cac_leakage_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
+ }
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
+{
+
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+ ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+ ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+ ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+ ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_get_memory_type(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+
+ if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
+ MC_SEQ_MISC0_GDDR5_VALUE)
+ pi->mem_gddr5 = true;
+ else
+ pi->mem_gddr5 = false;
+
+}
+
+void ci_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+void ci_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ ci_update_requested_ps(rdev, new_ps);
+
+ ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
+
+ return 0;
+}
+
+void ci_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ ci_update_current_ps(rdev, new_ps);
+}
+
+
+void ci_dpm_setup_asic(struct radeon_device *rdev)
+{
+ ci_read_clock_registers(rdev);
+ ci_get_memory_type(rdev);
+ ci_enable_acpi_power_management(rdev);
+ ci_init_sclk_t(rdev);
+}
+
+int ci_dpm_enable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (ci_is_smc_running(rdev))
+ return -EINVAL;
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ ci_enable_voltage_control(rdev);
+ ret = ci_construct_voltage_tables(rdev);
+ if (ret) {
+ DRM_ERROR("ci_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_initialize_mc_reg_table(rdev);
+ if (ret)
+ pi->caps_dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ ci_enable_spread_spectrum(rdev, true);
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, true);
+ ci_program_sstp(rdev);
+ ci_enable_display_gap(rdev);
+ ci_program_vc(rdev);
+ ret = ci_upload_firmware(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_firmware failed\n");
+ return ret;
+ }
+ ret = ci_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("ci_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
+ if (ret) {
+ DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = ci_init_smc_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_smc_table failed\n");
+ return ret;
+ }
+ ret = ci_init_arb_table_index(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_populate_initial_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_populate_pm_base(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_pm_base failed\n");
+ return ret;
+ }
+ ci_dpm_start_smc(rdev);
+ ci_enable_vr_hot_gpio_interrupt(rdev);
+ ret = ci_notify_smc_display_change(rdev, false);
+ if (ret) {
+ DRM_ERROR("ci_notify_smc_display_change failed\n");
+ return ret;
+ }
+ ci_enable_sclk_control(rdev, true);
+ ret = ci_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ulv failed\n");
+ return ret;
+ }
+ ret = ci_enable_ds_master_switch(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ds_master_switch failed\n");
+ return ret;
+ }
+ ret = ci_start_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_start_dpm failed\n");
+ return ret;
+ }
+ ret = ci_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_didt failed\n");
+ return ret;
+ }
+ ret = ci_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = ci_enable_power_containment(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_power_containment failed\n");
+ return ret;
+ }
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+#if 0
+ PPSMC_Result result;
+#endif
+ ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("ci_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+#if 0
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+
+ if (result != PPSMC_Result_OK)
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+#endif
+ }
+
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ci_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ ci_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+void ci_dpm_disable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_dpm_powergate_uvd(rdev, false);
+
+ if (!ci_is_smc_running(rdev))
+ return;
+
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, false);
+ ci_enable_power_containment(rdev, false);
+ ci_enable_smc_cac(rdev, false);
+ ci_enable_didt(rdev, false);
+ ci_enable_spread_spectrum(rdev, false);
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ ci_stop_dpm(rdev);
+ ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ulv(rdev, false);
+ ci_clear_vc(rdev);
+ ci_reset_to_default(rdev);
+ ci_dpm_stop_smc(rdev);
+ ci_force_switch_to_arb_f0(rdev);
+
+ ci_update_current_ps(rdev, boot_ps);
+}
+
+int ci_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ struct radeon_ps *old_ps = &pi->current_rps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
+ if (pi->pcie_performance_request)
+ ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+ ret = ci_freeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+ return ret;
+ }
+ ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+#if 0
+ ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("ci_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ ret = ci_update_sclk_t(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_sclk_t failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_update_and_upload_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_program_memory_timing_parameters(rdev);
+ if (ret) {
+ DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ ret = ci_unfreeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_upload_dpm_level_enable_mask(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+ if (pi->pcie_performance_request)
+ ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+ ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+ if (ret) {
+ DRM_ERROR("ci_dpm_force_performance_level failed\n");
+ return ret;
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ return 0;
+}
+
+int ci_dpm_power_control_set_level(struct radeon_device *rdev)
+{
+ return ci_power_control_set_level(rdev);
+}
+
+void ci_dpm_reset_asic(struct radeon_device *rdev)
+{
+ ci_set_boot_state(rdev);
+}
+
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+ ci_program_display_gap(rdev);
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ rdev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl = &ps->performance_levels[index];
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+ pl->pcie_gen = r600_get_pcie_gen_support(rdev,
+ pi->sys_pcie_mask,
+ pi->vbios_boot_state.pcie_gen_bootup_value,
+ clock_info->ci.ucPCIEGen);
+ pl->pcie_lane = r600_get_pcie_lane_support(rdev,
+ pi->vbios_boot_state.pcie_lane_bootup_value,
+ le16_to_cpu(clock_info->ci.usPCIELane));
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+ pi->ulv.supported = true;
+ pi->ulv.pl = *pl;
+ pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+ }
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+ pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+ pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+ pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+ }
+
+ switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ pi->use_pcie_powersaving_levels = true;
+ if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+ pi->pcie_gen_powersaving.max = pl->pcie_gen;
+ if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+ pi->pcie_gen_powersaving.min = pl->pcie_gen;
+ if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+ pi->pcie_lane_powersaving.max = pl->pcie_lane;
+ if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+ pi->pcie_lane_powersaving.min = pl->pcie_lane;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ pi->use_pcie_performance_levels = true;
+ if (pi->pcie_gen_performance.max < pl->pcie_gen)
+ pi->pcie_gen_performance.max = pl->pcie_gen;
+ if (pi->pcie_gen_performance.min > pl->pcie_gen)
+ pi->pcie_gen_performance.min = pl->pcie_gen;
+ if (pi->pcie_lane_performance.max < pl->pcie_lane)
+ pi->pcie_lane_performance.max = pl->pcie_lane;
+ if (pi->pcie_lane_performance.min > pl->pcie_lane)
+ pi->pcie_lane_performance.min = pl->pcie_lane;
+ break;
+ default:
+ break;
+ }
+}
+
+static int ci_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct ci_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ ci_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int ci_get_vbios_boot_values(struct radeon_device *rdev,
+ struct ci_vbios_boot_state *boot_state)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+ data_offset);
+ boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+ boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+ boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+ boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
+ boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
+ boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+ boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void ci_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ r600_free_extended_power_table(rdev);
+}
+
+int ci_dpm_init(struct radeon_device *rdev)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ u16 data_offset, size;
+ u8 frev, crev;
+ struct ci_power_info *pi;
+ int ret;
+ u32 mask;
+
+ pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret)
+ pi->sys_pcie_mask = 0;
+ else
+ pi->sys_pcie_mask = mask;
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+
+ pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
+ pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
+
+ pi->pcie_lane_performance.max = 0;
+ pi->pcie_lane_performance.min = 16;
+ pi->pcie_lane_powersaving.max = 0;
+ pi->pcie_lane_powersaving.min = 16;
+
+ ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
+
+ pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+ pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+ pi->sclk_dpm_key_disabled = 0;
+ pi->mclk_dpm_key_disabled = 0;
+ pi->pcie_dpm_key_disabled = 0;
+
+ pi->caps_sclk_ds = true;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ pi->mclk_stutter_mode_threshold = 40000;
+ pi->mclk_edc_enable_threshold = 40000;
+ pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ci_initialize_powertune_defaults(rdev);
+
+ pi->caps_fps = false;
+
+ pi->caps_sclk_throttle_low_notification = false;
+
+ pi->caps_uvd_dpm = true;
+
+ ci_get_leakage_voltages(rdev);
+ ci_patch_dependency_tables_with_leakage(rdev);
+ ci_set_private_data_variables_based_on_pptable(rdev);
+
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ ci_dpm_fini(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+ rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+
+ pi->uvd_enabled = false;
+
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+ }
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+ }
+
+ pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+ pi->pcie_performance_request =
+ radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+ pi->pcie_performance_request = false;
+#endif
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ pi->caps_sclk_ss_support = true;
+ pi->caps_mclk_ss_support = true;
+ pi->dynamic_ss = true;
+ } else {
+ pi->caps_sclk_ss_support = false;
+ pi->caps_mclk_ss_support = false;
+ pi->dynamic_ss = true;
+ }
+
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ pi->caps_dynamic_ac_timing = true;
+
+ pi->uvd_power_gated = false;
+
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ return 0;
+}
+
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ u32 sclk = ci_get_average_sclk_freq(rdev);
+ u32 mclk = ci_get_average_mclk_freq(rdev);
+
+ seq_printf(m, "power level avg sclk: %u mclk: %u\n",
+ sclk, mclk);
+}
+
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl;
+ int i;
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+ i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 00000000000..93bbed977ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+struct ci_pl {
+ u32 mclk;
+ u32 sclk;
+ enum radeon_pcie_gen pcie_gen;
+ u16 pcie_lane;
+};
+
+struct ci_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ u32 sclk_t;
+ struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+ bool enabled;
+ u32 value;
+ u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+ u32 count;
+ struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+ struct ci_single_dpm_table sclk_table;
+ struct ci_single_dpm_table mclk_table;
+ struct ci_single_dpm_table pcie_speed_table;
+ struct ci_single_dpm_table vddc_table;
+ struct ci_single_dpm_table vddci_table;
+ struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+ bool supported;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT 8
+
+struct ci_leakage_voltage {
+ u16 count;
+ u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+ u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+ u32 uvd_dpm_enable_mask;
+ u32 vce_dpm_enable_mask;
+ u32 acp_dpm_enable_mask;
+ u32 samu_dpm_enable_mask;
+ u32 sclk_dpm_enable_mask;
+ u32 mclk_dpm_enable_mask;
+ u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+ u16 mvdd_bootup_value;
+ u16 vddc_bootup_value;
+ u16 vddci_bootup_value;
+ u32 sclk_bootup_value;
+ u32 mclk_bootup_value;
+ u16 pcie_gen_bootup_value;
+ u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+ s32 temperature_low;
+ s32 temperature_high;
+ s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+ u16 max;
+ u16 min;
+};
+
+enum ci_pt_config_reg_type {
+ CISLANDS_CONFIGREG_MMR = 0,
+ CISLANDS_CONFIGREG_SMC_IND,
+ CISLANDS_CONFIGREG_DIDT_IND,
+ CISLANDS_CONFIGREG_CACHE,
+ CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct ci_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+
+struct ci_power_info {
+ struct ci_dpm_table dpm_table;
+ u32 voltage_control;
+ u32 mvdd_control;
+ u32 vddci_control;
+ u32 active_auto_throttle_sources;
+ struct ci_clock_registers clock_registers;
+ u16 acpi_vddc;
+ u16 acpi_vddci;
+ enum radeon_pcie_gen force_pcie_gen;
+ enum radeon_pcie_gen acpi_pcie_gen;
+ struct ci_leakage_voltage vddc_leakage;
+ struct ci_leakage_voltage vddci_leakage;
+ u16 max_vddc_in_pp_table;
+ u16 min_vddc_in_pp_table;
+ u16 max_vddci_in_pp_table;
+ u16 min_vddci_in_pp_table;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 mclk_edc_wr_enable_threshold;
+ struct ci_vbios_boot_state vbios_boot_state;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 fan_table_start;
+ u32 arb_table_start;
+ /* smc tables */
+ SMU7_Discrete_DpmTable smc_state_table;
+ SMU7_Discrete_MCRegisters smc_mc_reg_table;
+ SMU7_Discrete_PmFuses smc_powertune_table;
+ /* other stuff */
+ struct ci_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct ci_ulv_parm ulv;
+ u32 power_containment_features;
+ const struct ci_pt_defaults *powertune_defaults;
+ u32 dte_tj_offset;
+ bool vddc_phase_shed_control;
+ struct ci_thermal_temperature_setting thermal_temp_setting;
+ struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+ u32 need_update_smu7_dpm_table;
+ u32 sclk_dpm_key_disabled;
+ u32 mclk_dpm_key_disabled;
+ u32 pcie_dpm_key_disabled;
+ struct ci_pcie_perf_range pcie_gen_performance;
+ struct ci_pcie_perf_range pcie_lane_performance;
+ struct ci_pcie_perf_range pcie_gen_powersaving;
+ struct ci_pcie_perf_range pcie_lane_powersaving;
+ u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+ u32 mclk_activity_target;
+ u32 low_sclk_interrupt_t;
+ u32 last_mclk_dpm_enable_mask;
+ u32 sys_pcie_mask;
+ /* caps */
+ bool caps_power_containment;
+ bool caps_cac;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_fps;
+ bool caps_sclk_ds;
+ bool caps_sclk_ss_support;
+ bool caps_mclk_ss_support;
+ bool caps_uvd_dpm;
+ bool caps_vce_dpm;
+ bool caps_samu_dpm;
+ bool caps_acp_dpm;
+ bool caps_automatic_dc_transition;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_dynamic_ac_timing;
+ /* flags */
+ bool thermal_protection;
+ bool pcie_performance_request;
+ bool dynamic_ss;
+ bool dll_default_on;
+ bool cac_enabled;
+ bool uvd_enabled;
+ bool battery_state;
+ bool pspp_notify_required;
+ bool mem_gddr5;
+ bool enable_bapm_feature;
+ bool enable_tdc_limit_feature;
+ bool enable_pkg_pwr_tracking_feature;
+ bool use_pcie_performance_levels;
+ bool use_pcie_powersaving_levels;
+ bool uvd_power_gated;
+ /* driver states */
+ struct radeon_ps current_rps;
+ struct ci_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct ci_ps requested_ps;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
+
+#define CISLANDS_VRC_DFLT0 0x3FFFC000
+#define CISLANDS_VRC_DFLT1 0x000400
+#define CISLANDS_VRC_DFLT2 0xC00080
+#define CISLANDS_VRC_DFLT3 0xC00200
+#define CISLANDS_VRC_DFLT4 0xC01680
+#define CISLANDS_VRC_DFLT5 0xC00033
+#define CISLANDS_VRC_DFLT6 0xC00033
+#define CISLANDS_VRC_DFLT7 0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT 30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void ci_start_smc(struct radeon_device *rdev);
+void ci_reset_smc(struct radeon_device *rdev);
+int ci_program_jump_on_start(struct radeon_device *rdev);
+void ci_stop_smc_clock(struct radeon_device *rdev);
+void ci_start_smc_clock(struct radeon_device *rdev);
+bool ci_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit);
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 00000000000..53b43dd3cf1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "radeon_ucode.h"
+
+static int ci_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ u32 data, original_data;
+ u32 addr;
+ u32 extra_shift;
+ int ret;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
+void ci_start_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void ci_reset_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp |= RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int ci_program_jump_on_start(struct radeon_device *rdev)
+{
+ static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void ci_stop_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp |= CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void ci_start_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp &= ~CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool ci_is_smc_running(struct radeon_device *rdev)
+{
+ u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ u32 pc_c = RREG32_SMC(SMC_PC_C);
+
+ if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
+{
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!rdev->smc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ src = (const u8 *)rdev->smc_fw->data;
+ WREG32(SMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, value);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 8928bd109c1..a3bba058727 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -30,22 +30,8 @@
#include "cikd.h"
#include "atom.h"
#include "cik_blit_shaders.h"
-
-/* GFX */
-#define CIK_PFP_UCODE_SIZE 2144
-#define CIK_ME_UCODE_SIZE 2144
-#define CIK_CE_UCODE_SIZE 2144
-/* compute */
-#define CIK_MEC_UCODE_SIZE 4192
-/* interrupts */
-#define BONAIRE_RLC_UCODE_SIZE 2048
-#define KB_RLC_UCODE_SIZE 2560
-#define KV_RLC_UCODE_SIZE 2560
-/* gddr controller */
-#define CIK_MC_UCODE_SIZE 7866
-/* sdma */
-#define CIK_SDMA_UCODE_SIZE 1050
-#define CIK_SDMA_UCODE_VERSION 64
+#include "radeon_ucode.h"
+#include "clearstate_ci.h"
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -54,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -72,10 +59,61 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void si_rlc_fini(struct radeon_device *rdev);
-extern int si_rlc_init(struct radeon_device *rdev);
+extern void si_rlc_reset(struct radeon_device *rdev);
+extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
+extern int cik_sdma_resume(struct radeon_device *rdev);
+extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
+extern void cik_sdma_fini(struct radeon_device *rdev);
+extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
+static void cik_pcie_gen3_enable(struct radeon_device *rdev);
+static void cik_program_aspm(struct radeon_device *rdev);
+static void cik_init_pg(struct radeon_device *rdev);
+static void cik_init_cg(struct radeon_device *rdev);
+
+/* get temperature in millidegrees */
+int ci_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
+
+/* get temperature in millidegrees */
+int kv_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = RREG32_SMC(0xC0300E0C);
+
+ if (temp)
+ actual_temp = (temp / 8) - 49;
+ else
+ actual_temp = 0;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
/*
* Indirect registers accessor
@@ -98,6 +136,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_DATA);
}
+static const u32 spectre_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc178 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc278 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc27c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc280 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc284 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc288 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc29c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc778 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc77c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc780 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc784 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc788 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc78c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a4 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a8 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7ac >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92cc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 kalindi_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3e1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
static const u32 bonaire_golden_spm_registers[] =
{
0x30800, 0xe0ffffff, 0xe0000000
@@ -744,7 +1554,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size,
- sdma_req_size;
+ sdma_req_size, smc_req_size;
char fw_name[30];
int err;
@@ -760,6 +1570,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
mc_req_size = CIK_MC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
@@ -851,7 +1662,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
- /* No MC ucode on APUs */
+ /* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -863,6 +1674,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
}
out:
@@ -881,6 +1707,8 @@ out:
rdev->rlc_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
}
return err;
}
@@ -1880,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev)
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KAVERI:
- /* TODO */
+ rdev->config.cik.max_shader_engines = 1;
+ rdev->config.cik.max_tile_pipes = 4;
+ if ((rdev->pdev->device == 0x1304) ||
+ (rdev->pdev->device == 0x1305) ||
+ (rdev->pdev->device == 0x130C) ||
+ (rdev->pdev->device == 0x130F) ||
+ (rdev->pdev->device == 0x1310) ||
+ (rdev->pdev->device == 0x1311) ||
+ (rdev->pdev->device == 0x131C)) {
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1309) ||
+ (rdev->pdev->device == 0x130A) ||
+ (rdev->pdev->device == 0x130D) ||
+ (rdev->pdev->device == 0x1313)) {
+ rdev->config.cik.max_cu_per_sh = 6;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1306) ||
+ (rdev->pdev->device == 0x1307) ||
+ (rdev->pdev->device == 0x130B) ||
+ (rdev->pdev->device == 0x130E) ||
+ (rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x131B)) {
+ rdev->config.cik.max_cu_per_sh = 4;
+ rdev->config.cik.max_backends_per_se = 1;
+ } else {
+ rdev->config.cik.max_cu_per_sh = 3;
+ rdev->config.cik.max_backends_per_se = 1;
+ }
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_texture_channel_caches = 4;
+ rdev->config.cik.max_gprs = 256;
+ rdev->config.cik.max_gs_threads = 16;
+ rdev->config.cik.max_hw_contexts = 8;
+
+ rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KABINI:
default:
@@ -2535,8 +3402,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2593,7 +3460,6 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
}
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -2612,7 +3478,6 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
}
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -2620,10 +3485,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
-
- rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
- WDOORBELL32(ring->doorbell_offset, wptr);
+ rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+ WDOORBELL32(ring->doorbell_offset, ring->wptr);
}
/**
@@ -2920,7 +3783,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(CP_HPD_EOP_CONTROL);
tmp &= ~EOP_SIZE_MASK;
- tmp |= drm_order(MEC_HPD_SIZE / 8);
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3037,9 +3900,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
mqd->queue_state.cp_hqd_pq_control |=
- drm_order(rdev->ring[idx].ring_size / 8);
+ order_base_2(rdev->ring[idx].ring_size / 8);
mqd->queue_state.cp_hqd_pq_control |=
- (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+ (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
#ifdef __BIG_ENDIAN
mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
#endif
@@ -3150,13 +4013,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
- /* Reset all cp blocks */
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -3171,579 +4027,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * sDMA - System DMA
- * Starting with CIK, the GPU has new asynchronous
- * DMA engines. These engines are used for compute
- * and gfx. There are two DMA engines (SDMA0, SDMA1)
- * and each one supports 1 ring buffer used for gfx
- * and 2 queues used for compute.
- *
- * The programming model is very similar to the CP
- * (ring buffer, IBs, etc.), but sDMA has it's own
- * packet format that is different from the PM4 format
- * used by the CP. sDMA supports copying data, writing
- * embedded data, solid fills, and a number of other
- * things. It also has support for tiling/detiling of
- * buffers.
- */
-/**
- * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (CIK).
- */
-void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 5;
- while ((next_rptr & 7) != 4)
- next_rptr++;
- next_rptr += 4;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, next_rptr);
- }
-
- /* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 4)
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
- radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, ib->length_dw);
-
-}
-
-/**
- * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (CIK).
- */
-void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (fence->ring == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- /* write the fence */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-}
-
-/**
- * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (CIK).
- */
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
- radeon_ring_write(ring, addr & 0xfffffff8);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-}
-
-/**
- * cik_sdma_gfx_stop - stop the gfx async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the gfx async dma ring buffers (CIK).
- */
-static void cik_sdma_gfx_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl, reg_offset;
- int i;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
- rb_cntl &= ~SDMA_RB_ENABLE;
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
- }
-}
-
-/**
- * cik_sdma_rlc_stop - stop the compute async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the compute async dma queues (CIK).
- */
-static void cik_sdma_rlc_stop(struct radeon_device *rdev)
-{
- /* XXX todo */
-}
-
-/**
- * cik_sdma_enable - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- * @enable: enable/disable the DMA MEs.
- *
- * Halt or unhalt the async dma engines (CIK).
- */
-static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
-{
- u32 me_cntl, reg_offset;
- int i;
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
- if (enable)
- me_cntl &= ~SDMA_HALT;
- else
- me_cntl |= SDMA_HALT;
- WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
- }
-}
-
-/**
- * cik_sdma_gfx_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the gfx DMA ring buffers and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_gfx_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = SDMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = SDMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
- WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
-
- ring->wptr = 0;
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
-
- /* enable DMA RB */
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
-
- ib_cntl = SDMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= SDMA_IB_SWAP_ENABLE;
-#endif
- /* enable DMA IBs */
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cik_sdma_rlc_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the compute DMA queues and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_rlc_resume(struct radeon_device *rdev)
-{
- /* XXX todo */
- return 0;
-}
-
-/**
- * cik_sdma_load_microcode - load the sDMA ME ucode
- *
- * @rdev: radeon_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int cik_sdma_load_microcode(struct radeon_device *rdev)
-{
- const __be32 *fw_data;
- int i;
-
- if (!rdev->sdma_fw)
- return -EINVAL;
-
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
-
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- return 0;
-}
-
-/**
- * cik_sdma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA engines and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_resume(struct radeon_device *rdev)
-{
- int r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
- r = cik_sdma_load_microcode(rdev);
- if (r)
- return r;
-
- /* unhalt the MEs */
- cik_sdma_enable(rdev, true);
-
- /* start the gfx rings and rlc compute queues */
- r = cik_sdma_gfx_resume(rdev);
- if (r)
- return r;
- r = cik_sdma_rlc_resume(rdev);
- if (r)
- return r;
-
- return 0;
-}
-
-/**
- * cik_sdma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (CIK).
- */
-static void cik_sdma_fini(struct radeon_device *rdev)
-{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
- /* XXX - compute dma queue tear down */
-}
-
-/**
- * cik_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (CIK).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0x1fffff)
- cur_size_in_bytes = 0x1fffff;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, cur_size_in_bytes);
- radeon_ring_write(ring, 0); /* src/dst endian swap */
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
-/**
- * cik_sdma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (CIK).
- * Returns 0 for success, error for failure.
- */
-int cik_sdma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-/**
- * cik_sdma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (CIK).
- * Returns 0 on success, error on failure.
- */
-int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
- ib.ptr[3] = 1;
- ib.ptr[4] = 0xDEADBEEF;
- ib.length_dw = 5;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-
static void cik_print_gpu_status_regs(struct radeon_device *rdev)
{
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -3793,7 +4076,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
* mask to be used by cik_gpu_soft_reset().
* Returns a mask of the blocks to be reset.
*/
-static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -4044,34 +4327,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cik_sdma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up (CIK).
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cik_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
/**
* cik_mc_program - program the GPU memory controller
@@ -4608,131 +4863,8 @@ void cik_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
- }
-}
-
-/**
- * cik_dma_vm_flush - cik vm flush using sDMA
- *
- * @rdev: radeon_device pointer
- *
- * Update the page table base and flush the VM TLB
- * using sDMA (CIK).
- */
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (vm == NULL)
- return;
-
- if (ridx == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm->id < 8) {
- radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
- } else {
- radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* update SH_MEM_* regs */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(vm->id));
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_BASES >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(0));
-
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-
- /* flush TLB */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 1 << vm->id);
}
/*
@@ -4741,31 +4873,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
* variety of functions, the most important of which is
* the interrupt controller.
*/
-/**
- * cik_rlc_stop - stop the RLC ME
- *
- * @rdev: radeon_device pointer
- *
- * Halt the RLC ME (MicroEngine) (CIK).
- */
-static void cik_rlc_stop(struct radeon_device *rdev)
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable)
{
- int i, j, k;
- u32 mask, tmp;
+ u32 tmp = RREG32(CP_INT_CNTL_RING0);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (enable)
+ tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ else
+ tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
+}
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp;
- tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
- WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+ tmp = RREG32(RLC_LB_CNTL);
+ if (enable)
+ tmp |= LOAD_BALANCE_ENABLE;
+ else
+ tmp &= ~LOAD_BALANCE_ENABLE;
+ WREG32(RLC_LB_CNTL, tmp);
+}
- WREG32(RLC_CNTL, 0);
+static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
+{
+ u32 i, j, k;
+ u32 mask;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
@@ -4787,6 +4922,84 @@ static void cik_rlc_stop(struct radeon_device *rdev)
}
}
+static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_CNTL);
+ if (tmp != rlc)
+ WREG32(RLC_CNTL, rlc);
+}
+
+static u32 cik_halt_rlc(struct radeon_device *rdev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_CNTL);
+
+ if (data & RLC_ENABLE) {
+ u32 i;
+
+ data &= ~RLC_ENABLE;
+ WREG32(RLC_CNTL, data);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
+ break;
+ udelay(1);
+ }
+
+ cik_wait_for_rlc_serdes(rdev);
+ }
+
+ return orig;
+}
+
+void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i, mask;
+
+ tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+
+ mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & mask) == mask)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+}
+
+/**
+ * cik_rlc_stop - stop the RLC ME
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Halt the RLC ME (MicroEngine) (CIK).
+ */
+static void cik_rlc_stop(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, 0);
+
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ cik_wait_for_rlc_serdes(rdev);
+}
+
/**
* cik_rlc_start - start the RLC ME
*
@@ -4796,13 +5009,9 @@ static void cik_rlc_stop(struct radeon_device *rdev)
*/
static void cik_rlc_start(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(RLC_CNTL, RLC_ENABLE);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ cik_enable_gui_idle_interrupt(rdev, true);
udelay(50);
}
@@ -4818,8 +5027,7 @@ static void cik_rlc_start(struct radeon_device *rdev)
*/
static int cik_rlc_resume(struct radeon_device *rdev)
{
- u32 i, size;
- u32 clear_state_info[3];
+ u32 i, size, tmp;
const __be32 *fw_data;
if (!rdev->rlc_fw)
@@ -4840,12 +5048,15 @@ static int cik_rlc_resume(struct radeon_device *rdev)
cik_rlc_stop(rdev);
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
+ /* disable CG */
+ tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
+ WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+
+ si_rlc_reset(rdev);
+
+ cik_init_pg(rdev);
+
+ cik_init_cg(rdev);
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
@@ -4864,20 +5075,757 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(RLC_GPM_UCODE_ADDR, 0);
- /* XXX */
- clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr);
- clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr;
- clear_state_info[2] = 0;//cik_default_size;
- WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d);
- for (i = 0; i < 3; i++)
- WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
- WREG32(RLC_DRIVER_DMA_STATUS, 0);
+ /* XXX - find out what chips support lbpw */
+ cik_enable_lbpw(rdev, false);
+
+ if (rdev->family == CHIP_BONAIRE)
+ WREG32(RLC_DRIVER_DMA_STATUS, 0);
cik_rlc_start(rdev);
return 0;
}
+static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp, tmp2;
+
+ orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ cik_enable_gui_idle_interrupt(rdev, true);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
+ WREG32(RLC_SERDES_WR_CTRL, tmp2);
+
+ cik_update_rlc(rdev, tmp);
+
+ data |= CGCG_EN | CGLS_EN;
+ } else {
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ data &= ~(CGCG_EN | CGLS_EN);
+ }
+
+ if (orig != data)
+ WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp = 0;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+ }
+
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data &= 0xfffffffd;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data &= ~SM_MODE_MASK;
+ data |= SM_MODE(0x2);
+ data |= SM_MODE_ENABLE;
+ data &= ~CGTS_OVERRIDE;
+ if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
+ (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
+ data &= ~CGTS_LS_OVERRIDE;
+ data &= ~ON_MONITOR_ADD_MASK;
+ data |= ON_MONITOR_ADD_EN;
+ data |= ON_MONITOR_ADD(0x96);
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+ }
+ } else {
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000002;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(RLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_LS_EN) {
+ data &= ~RLC_MEM_LS_EN;
+ WREG32(RLC_MEM_SLP_CNTL, data);
+ }
+
+ data = RREG32(CP_MEM_SLP_CNTL);
+ if (data & CP_MEM_LS_EN) {
+ data &= ~CP_MEM_LS_EN;
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+ }
+}
+
+static const u32 mc_cg_registers[] =
+{
+ MC_HUB_MISC_HUB_CG,
+ MC_HUB_MISC_SIP_CG,
+ MC_HUB_MISC_VM_CG,
+ MC_XPB_CLK_GAT,
+ ATC_MISC_CG,
+ MC_CITF_MISC_WR_CG,
+ MC_CITF_MISC_RD_CG,
+ MC_CITF_MISC_VM_CG,
+ VM_L2_CG,
+};
+
+static void cik_enable_mc_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
+ data |= MC_LS_ENABLE;
+ else
+ data &= ~MC_LS_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+ } else {
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_sdma_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ } else {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data = 0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data |= DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data &= ~DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ }
+}
+
+static void cik_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+}
+
+static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void cik_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+
+void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
+{
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ cik_enable_mgcg(rdev, true);
+ cik_enable_cgcg(rdev, true);
+ } else {
+ cik_enable_cgcg(rdev, false);
+ cik_enable_mgcg(rdev, false);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_MC) {
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ cik_enable_mc_mgcg(rdev, enable);
+ cik_enable_mc_ls(rdev, enable);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ cik_enable_sdma_mgcg(rdev, enable);
+ cik_enable_sdma_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_BIF) {
+ cik_enable_bif_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd)
+ cik_enable_uvd_mgcg(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_HDP) {
+ cik_enable_hdp_mgcg(rdev, enable);
+ cik_enable_hdp_ls(rdev, enable);
+ }
+}
+
+static void cik_init_cg(struct radeon_device *rdev)
+{
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
+
+ if (rdev->has_uvd)
+ si_init_uvd_internal_cg(rdev);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+}
+
+static void cik_fini_cg(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
+}
+
+static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
+ data &= ~DISABLE_CP_PG;
+ else
+ data |= DISABLE_CP_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
+ data &= ~DISABLE_GDS_PG;
+ else
+ data |= DISABLE_GDS_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define CP_ME_TABLE_SIZE 96
+#define CP_ME_TABLE_OFFSET 2048
+#define CP_MEC_TABLE_OFFSET 4096
+
+void cik_init_cp_pg_table(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset;
+
+ if (rdev->family == CHIP_KAVERI)
+ max_me = 5;
+
+ if (rdev->rlc.cp_table_ptr == NULL)
+ return;
+
+ /* write the cp table buffer */
+ dst_ptr = rdev->rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
+
+ for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
+ dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+ }
+ bo_offset += CP_ME_TABLE_SIZE;
+ }
+}
+
+static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data |= AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+ } else {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data &= ~GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+ data = RREG32(DB_RENDER_CONTROL);
+ }
+}
+
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
+{
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ cik_select_se_sh(rdev, se, sh);
+ tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void cik_init_ao_cu_mask(struct radeon_device *rdev)
+{
+ u32 i, j, k, active_cu_number = 0;
+ u32 mask, counter, cu_bitmap;
+ u32 tmp = 0;
+
+ for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+ for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
+ if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
+ if (counter < 2)
+ cu_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+
+ active_cu_number += counter;
+ tmp |= (cu_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+ tmp = RREG32(RLC_MAX_PG_CU);
+ tmp &= ~MAX_PU_CU_MASK;
+ tmp |= MAX_PU_CU(active_cu_number);
+ WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
+ data |= STATIC_PER_CU_PG_ENABLE;
+ else
+ data &= ~STATIC_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
+ data |= DYN_PER_CU_PG_ENABLE;
+ else
+ data &= ~DYN_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+static void cik_init_gfx_cgpg(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ u32 i;
+
+ if (rdev->rlc.cs_data) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
+ } else {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ for (i = 0; i < 3; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, 0);
+ }
+ if (rdev->rlc.reg_list) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
+ }
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_SRC;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
+
+ data = RREG32(CP_RB_WPTR_POLL_CNTL);
+ data &= ~IDLE_POLL_COUNT_MASK;
+ data |= IDLE_POLL_COUNT(0x60);
+ WREG32(CP_RB_WPTR_POLL_CNTL, data);
+
+ data = 0x10101010;
+ WREG32(RLC_PG_DELAY, data);
+
+ data = RREG32(RLC_PG_DELAY_2);
+ data &= ~0xff;
+ data |= 0x3;
+ WREG32(RLC_PG_DELAY_2, data);
+
+ data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~GRBM_REG_SGIT_MASK;
+ data |= GRBM_REG_SGIT(0x700);
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+}
+
+static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
+{
+ cik_enable_gfx_cgpg(rdev, enable);
+ cik_enable_gfx_static_mgpg(rdev, enable);
+ cik_enable_gfx_dynamic_mgpg(rdev, enable);
+}
+
+u32 cik_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config/pa_sc_raster_config1 */
+ count += 4;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ buffer[count++] = 0x16000012;
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KAVERI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KABINI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ buffer[count++] = 0x00000000;
+ break;
+ }
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
+}
+
+static void cik_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_enable_sck_slowdown_on_pu(rdev, true);
+ cik_enable_sck_slowdown_on_pd(rdev, true);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_init_gfx_cgpg(rdev);
+ cik_enable_cp_pg(rdev, true);
+ cik_enable_gds_pg(rdev, true);
+ }
+ cik_init_ao_cu_mask(rdev);
+ cik_update_gfx_pg(rdev, true);
+ }
+}
+
+static void cik_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_update_gfx_pg(rdev, false);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_enable_cp_pg(rdev, false);
+ cik_enable_gds_pg(rdev, false);
+ }
+ }
+}
+
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
@@ -5040,7 +5988,7 @@ static int cik_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -5096,6 +6044,7 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
+ u32 thermal_int;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -5128,6 +6077,13 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+ ~(THERM_INTH_MASK | THERM_INTL_MASK);
+ else
+ thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+ ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -5285,6 +6241,14 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.dpm_thermal) {
+ DRM_DEBUG("dpm thermal\n");
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+ else
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ }
+
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -5319,6 +6283,11 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ if (rdev->flags & RADEON_IS_IGP)
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+ else
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+
return 0;
}
@@ -5530,6 +6499,7 @@ int cik_irq_process(struct radeon_device *rdev)
bool queue_hotplug = false;
bool queue_reset = false;
u32 addr, status, mc_client;
+ bool queue_thermal = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -5763,6 +6733,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -5880,6 +6854,19 @@ restart_ih:
break;
}
break;
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ rdev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ rdev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
case 241: /* SDMA Privileged inst */
case 247: /* SDMA Privileged inst */
DRM_ERROR("Illegal instruction in SDMA command stream\n");
@@ -5919,9 +6906,6 @@ restart_ih:
break;
}
break;
- case 233: /* GUI IDLE */
- DRM_DEBUG("IH: GUI idle\n");
- break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -5935,6 +6919,8 @@ restart_ih:
schedule_work(&rdev->hotplug_work);
if (queue_reset)
schedule_work(&rdev->reset_work);
+ if (queue_thermal)
+ schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
@@ -5964,6 +6950,16 @@ static int cik_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
+ /* enable pcie gen2/3 link */
+ cik_pcie_gen3_enable(rdev);
+ /* enable aspm */
+ cik_program_aspm(rdev);
+
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
cik_mc_program(rdev);
if (rdev->flags & RADEON_IS_IGP) {
@@ -5993,17 +6989,26 @@ static int cik_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
r = cik_pcie_gart_enable(rdev);
if (r)
return r;
cik_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family == CHIP_KAVERI) {
+ rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
+ } else {
+ rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
+ }
+ }
+ rdev->rlc.cs_data = ci_cs_data;
+ rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6051,12 +7056,15 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
- r = cik_uvd_resume(rdev);
+ r = radeon_uvd_resume(rdev);
if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ r = uvd_v4_2_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
}
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
@@ -6079,7 +7087,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6088,7 +7096,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -6100,7 +7108,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
/* dGPU only have 1 MEC */
@@ -6113,7 +7121,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6121,7 +7129,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6135,12 +7143,11 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6157,6 +7164,10 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6202,11 +7213,14 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_suspend(rdev);
radeon_wb_disable(rdev);
cik_pcie_gart_disable(rdev);
@@ -6327,7 +7341,7 @@ int cik_init(struct radeon_device *rdev)
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -6362,14 +7376,16 @@ void cik_fini(struct radeon_device *rdev)
{
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -6398,8 +7414,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode)
{
- u32 tmp;
-
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 6 line buffers, one for each display controllers.
@@ -6409,22 +7425,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
* them using the stereo blender.
*/
if (radeon_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920)
+ if (mode->crtc_hdisplay < 1920) {
tmp = 1;
- else if (mode->crtc_hdisplay < 2560)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 2560) {
tmp = 2;
- else if (mode->crtc_hdisplay < 4096)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 4096) {
tmp = 0;
- else {
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
+ } else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
tmp = 0;
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
}
- } else
+ } else {
tmp = 1;
+ buffer_alloc = 0;
+ }
WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -6826,7 +7857,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
- struct dce8_wm_params wm;
+ struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -6836,35 +7867,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
- wm.yclk = rdev->pm.current_mclk * 10;
- wm.sclk = rdev->pm.current_sclk * 10;
- wm.disp_clk = mode->clock;
- wm.src_width = mode->crtc_hdisplay;
- wm.active_time = mode->crtc_hdisplay * pixel_period;
- wm.blank_time = line_time - wm.active_time;
- wm.interlaced = false;
+ /* watermark for high clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_high.yclk =
+ radeon_dpm_get_mclk(rdev, false) * 10;
+ wm_high.sclk =
+ radeon_dpm_get_sclk(rdev, false) * 10;
+ } else {
+ wm_high.yclk = rdev->pm.current_mclk * 10;
+ wm_high.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm.interlaced = true;
- wm.vsc = radeon_crtc->vsc;
- wm.vtaps = 1;
+ wm_high.interlaced = true;
+ wm_high.vsc = radeon_crtc->vsc;
+ wm_high.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
- wm.vtaps = 2;
- wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm.lb_size = lb_size;
- wm.dram_channels = cik_get_number_of_dram_channels(rdev);
- wm.num_heads = num_heads;
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_high.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce8_check_latency_hiding(&wm_high) ||
+ (rdev->disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ }
+
+ /* watermark for low clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_low.yclk =
+ radeon_dpm_get_mclk(rdev, true) * 10;
+ wm_low.sclk =
+ radeon_dpm_get_sclk(rdev, true) * 10;
+ } else {
+ wm_low.yclk = rdev->pm.current_mclk * 10;
+ wm_low.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = radeon_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_low.num_heads = num_heads;
+
/* set for low clocks */
- /* wm.yclk = low clk; wm.sclk = low clk */
- latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
- if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
- !dce8_average_bandwidth_vs_available_bandwidth(&wm) ||
- !dce8_check_latency_hiding(&wm) ||
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce8_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
@@ -6889,6 +7967,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
+
+ /* save values for DPM */
+ radeon_crtc->line_time = line_time;
+ radeon_crtc->wm_high = latency_watermark_a;
+ radeon_crtc->wm_low = latency_watermark_b;
}
/**
@@ -6978,39 +8061,307 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return r;
}
-int cik_uvd_resume(struct radeon_device *rdev)
+static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
- uint64_t addr;
- uint32_t size;
- int r;
+ struct pci_dev *root = rdev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+ }
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+ gpu_pos = pci_pcie_cap(rdev->pdev);
+ if (!gpu_pos)
+ return;
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+ if (mask & DRM_PCIE_SPEED_80) {
+ /* re-try equalization if gen3 is not already enabled */
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+ current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & LC_RENEGOTIATION_SUPPORT) {
+ tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+ tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+ tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
- return 0;
+ for (i = 0; i < 10; i++) {
+ /* check status */
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_REDO_EQ;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ /* linkctl */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ /* linkctl2 */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp &= ~LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ /* set the link speed */
+ speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3; /* gen3 */
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2; /* gen2 */
+ else
+ tmp16 |= 1; /* gen1 */
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void cik_program_aspm(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (radeon_aspm == 0)
+ return;
+
+ /* XXX double check IGPs */
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ data &= ~LC_XMIT_N_FTS_MASK;
+ data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+ data |= LC_GO_TO_RECOVERY;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
+ data |= P_IGNORE_EDB_ERR;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+ data |= LC_PMI_TO_L1_DIS;
+ if (!disable_l0s)
+ data |= LC_L0S_INACTIVITY(7);
+
+ if (!disable_l1) {
+ data |= LC_L1_INACTIVITY(7);
+ data &= ~LC_PMI_TO_L1_DIS;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+ data |= LC_DYN_LANES_PWR_STATE(3);
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+ if (!disable_clkreq) {
+ struct pci_dev *root = rdev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+ data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+ orig = data = RREG32_SMC(THM_CLK_CNTL);
+ data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+ data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(THM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(MISC_CLK_CTRL);
+ data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+ data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(MISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
+ data &= ~BCLK_AS_XCLK;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
+ data &= ~FORCE_BIF_REFCLK_EN;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_CLKOUT_SEL_MASK;
+ data |= MPLL_CLKOUT_SEL(4);
+ if (orig != data)
+ WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+ data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index d71e46d571f..ca1bb613358 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -24,6 +24,9 @@
#ifndef __CIK_REG_H__
#define __CIK_REG_H__
+#define CIK_DIDT_IND_INDEX 0xca00
+#define CIK_DIDT_IND_DATA 0xca04
+
#define CIK_DC_GPIO_HPD_MASK 0x65b0
#define CIK_DC_GPIO_HPD_A 0x65b4
#define CIK_DC_GPIO_HPD_EN 0x65b8
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 00000000000..b6286068e11
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/* sdma */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
+
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines. These engines are used for compute
+ * and gfx. There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things. It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 5;
+ while ((next_rptr & 7) != 4)
+ next_rptr++;
+ next_rptr += 4;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* IB packet must end on a 8 DW boundary */
+ while ((ring->wptr & 7) != 4)
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+ radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (fence->ring == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ /* write the fence */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+}
+
+/**
+ * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+ radeon_ring_write(ring, addr & 0xfffffff8);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl, reg_offset;
+ int i;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
+ rb_cntl &= ~SDMA_RB_ENABLE;
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+ }
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct radeon_device *rdev)
+{
+ /* XXX todo */
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+{
+ u32 me_cntl, reg_offset;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
+ if (enable)
+ me_cntl &= ~SDMA_HALT;
+ else
+ me_cntl |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
+ }
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+ WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
+
+ /* enable DMA RB */
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
+
+ ib_cntl = SDMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= SDMA_IB_SWAP_ENABLE;
+#endif
+ /* enable DMA IBs */
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct radeon_device *rdev)
+{
+ /* XXX todo */
+ return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->sdma_fw)
+ return -EINVAL;
+
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ return 0;
+}
+
+/**
+ * cik_sdma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+
+ r = cik_sdma_load_microcode(rdev);
+ if (r)
+ return r;
+
+ /* unhalt the MEs */
+ cik_sdma_enable(rdev, true);
+
+ /* start the gfx rings and rlc compute queues */
+ r = cik_sdma_gfx_resume(rdev);
+ if (r)
+ return r;
+ r = cik_sdma_rlc_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * cik_sdma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (CIK).
+ */
+void cik_sdma_fini(struct radeon_device *rdev)
+{
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+ /* XXX - compute dma queue tear down */
+}
+
+/**
+ * cik_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (CIK).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, cur_size_in_bytes);
+ radeon_ring_write(ring, 0); /* src/dst endian swap */
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * cik_sdma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * cik_sdma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
+ ib.ptr[3] = 1;
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * cik_sdma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cik_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cik_sdma_vm_set_page - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_dma_vm_flush - cik vm flush using sDMA
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (vm == NULL)
+ return;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ } else {
+ radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* update SH_MEM_* regs */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(vm->id));
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_BASES >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
+ radeon_ring_write(ring, 1);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(0));
+
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+
+ /* flush TLB */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 7e9275eaef8..203d2a09a1f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -28,21 +28,375 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+/* DIDT IND registers */
+#define DIDT_SQ_CTRL0 0x0
+# define DIDT_CTRL_EN (1 << 0)
+#define DIDT_DB_CTRL0 0x20
+#define DIDT_TD_CTRL0 0x40
+#define DIDT_TCP_CTRL0 0x60
+
/* SMC IND registers */
+#define DPM_TABLE_475 0x3F768
+# define SamuBootLevel(x) ((x) << 0)
+# define SamuBootLevel_MASK 0x000000ff
+# define SamuBootLevel_SHIFT 0
+# define AcpBootLevel(x) ((x) << 8)
+# define AcpBootLevel_MASK 0x0000ff00
+# define AcpBootLevel_SHIFT 8
+# define VceBootLevel(x) ((x) << 16)
+# define VceBootLevel_MASK 0x00ff0000
+# define VceBootLevel_SHIFT 16
+# define UvdBootLevel(x) ((x) << 24)
+# define UvdBootLevel_MASK 0xff000000
+# define UvdBootLevel_SHIFT 24
+
+#define FIRMWARE_FLAGS 0x3F800
+# define INTERRUPTS_ENABLED (1 << 0)
+
+#define NB_DPM_CONFIG_1 0x3F9E8
+# define Dpm0PgNbPsLo(x) ((x) << 0)
+# define Dpm0PgNbPsLo_MASK 0x000000ff
+# define Dpm0PgNbPsLo_SHIFT 0
+# define Dpm0PgNbPsHi(x) ((x) << 8)
+# define Dpm0PgNbPsHi_MASK 0x0000ff00
+# define Dpm0PgNbPsHi_SHIFT 8
+# define DpmXNbPsLo(x) ((x) << 16)
+# define DpmXNbPsLo_MASK 0x00ff0000
+# define DpmXNbPsLo_SHIFT 16
+# define DpmXNbPsHi(x) ((x) << 24)
+# define DpmXNbPsHi_MASK 0xff000000
+# define DpmXNbPsHi_SHIFT 24
+
+#define SMC_SYSCON_RESET_CNTL 0x80000000
+# define RST_REG (1 << 0)
+#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
+# define CK_DISABLE (1 << 0)
+# define CKEN (1 << 24)
+
+#define SMC_SYSCON_MISC_CNTL 0x80000010
+
+#define SMC_SYSCON_MSG_ARG_0 0x80000068
+
+#define SMC_PC_C 0x80000370
+
+#define SMC_SCRATCH9 0x80000424
+
+#define RCU_UC_EVENTS 0xC0000004
+# define BOOT_SEQ_DONE (1 << 7)
+
#define GENERAL_PWRMGT 0xC0200000
+# define GLOBAL_PWRMGT_EN (1 << 0)
+# define STATIC_PM_EN (1 << 1)
+# define THERMAL_PROTECTION_DIS (1 << 2)
+# define THERMAL_PROTECTION_TYPE (1 << 3)
+# define SW_SMIO_INDEX(x) ((x) << 6)
+# define SW_SMIO_INDEX_MASK (1 << 6)
+# define SW_SMIO_INDEX_SHIFT 6
+# define VOLT_PWRMGT_EN (1 << 10)
# define GPU_COUNTER_CLK (1 << 15)
-
+# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
+
+#define CNB_PWRMGT_CNTL 0xC0200004
+# define GNB_SLOW_MODE(x) ((x) << 0)
+# define GNB_SLOW_MODE_MASK (3 << 0)
+# define GNB_SLOW_MODE_SHIFT 0
+# define GNB_SLOW (1 << 2)
+# define FORCE_NB_PS1 (1 << 3)
+# define DPM_ENABLED (1 << 4)
+
+#define SCLK_PWRMGT_CNTL 0xC0200008
+# define SCLK_PWRMGT_OFF (1 << 0)
+# define RESET_BUSY_CNT (1 << 4)
+# define RESET_SCLK_CNT (1 << 5)
+# define DYNAMIC_PM_EN (1 << 21)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014
+# define CURRENT_STATE_MASK (0xf << 4)
+# define CURRENT_STATE_SHIFT 4
+# define CURR_MCLK_INDEX_MASK (0xf << 8)
+# define CURR_MCLK_INDEX_SHIFT 8
+# define CURR_SCLK_INDEX_MASK (0x1f << 16)
+# define CURR_SCLK_INDEX_SHIFT 16
+
+#define CG_SSP 0xC0200044
+# define SST(x) ((x) << 0)
+# define SST_MASK (0xffff << 0)
+# define SSTU(x) ((x) << 16)
+# define SSTU_MASK (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL 0xC0200060
+# define DISP_GAP(x) ((x) << 0)
+# define DISP_GAP_MASK (3 << 0)
+# define VBI_TIMER_COUNT(x) ((x) << 4)
+# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
+# define VBI_TIMER_UNIT(x) ((x) << 20)
+# define VBI_TIMER_UNIT_MASK (7 << 20)
+# define DISP_GAP_MCHG(x) ((x) << 24)
+# define DISP_GAP_MCHG_MASK (3 << 24)
+
+#define SMU_VOLTAGE_STATUS 0xC0200094
+# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1)
+# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0
+# define CURR_PCIE_INDEX_MASK (0xf << 24)
+# define CURR_PCIE_INDEX_SHIFT 24
+
+#define CG_ULV_PARAMETER 0xC0200158
+
+#define CG_FTV_0 0xC02001A8
+#define CG_FTV_1 0xC02001AC
+#define CG_FTV_2 0xC02001B0
+#define CG_FTV_3 0xC02001B4
+#define CG_FTV_4 0xC02001B8
+#define CG_FTV_5 0xC02001BC
+#define CG_FTV_6 0xC02001C0
+#define CG_FTV_7 0xC02001C4
+
+#define CG_DISPLAY_GAP_CNTL2 0xC0200230
+
+#define LCAC_SX0_OVR_SEL 0xC0400D04
+#define LCAC_SX0_OVR_VAL 0xC0400D08
+
+#define LCAC_MC0_CNTL 0xC0400D30
+#define LCAC_MC0_OVR_SEL 0xC0400D34
+#define LCAC_MC0_OVR_VAL 0xC0400D38
+#define LCAC_MC1_CNTL 0xC0400D3C
+#define LCAC_MC1_OVR_SEL 0xC0400D40
+#define LCAC_MC1_OVR_VAL 0xC0400D44
+
+#define LCAC_MC2_OVR_SEL 0xC0400D4C
+#define LCAC_MC2_OVR_VAL 0xC0400D50
+
+#define LCAC_MC3_OVR_SEL 0xC0400D58
+#define LCAC_MC3_OVR_VAL 0xC0400D5C
+
+#define LCAC_CPL_CNTL 0xC0400D80
+#define LCAC_CPL_OVR_SEL 0xC0400D84
+#define LCAC_CPL_OVR_VAL 0xC0400D88
+
+/* dGPU */
+#define CG_THERMAL_CTRL 0xC0300004
+#define DPM_EVENT_SRC(x) ((x) << 0)
+#define DPM_EVENT_SRC_MASK (7 << 0)
+#define DIG_THERM_DPM(x) ((x) << 14)
+#define DIG_THERM_DPM_MASK 0x003FC000
+#define DIG_THERM_DPM_SHIFT 14
+
+#define CG_THERMAL_INT 0xC030000C
+#define CI_DIG_THERM_INTH(x) ((x) << 8)
+#define CI_DIG_THERM_INTH_MASK 0x0000FF00
+#define CI_DIG_THERM_INTH_SHIFT 8
+#define CI_DIG_THERM_INTL(x) ((x) << 16)
+#define CI_DIG_THERM_INTL_MASK 0x00FF0000
+#define CI_DIG_THERM_INTL_SHIFT 16
+#define THERM_INT_MASK_HIGH (1 << 24)
+#define THERM_INT_MASK_LOW (1 << 25)
+
+#define CG_MULT_THERMAL_STATUS 0xC0300014
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define CG_SPLL_FUNC_CNTL 0xC0500140
+#define SPLL_RESET (1 << 0)
+#define SPLL_PWRON (1 << 1)
+#define SPLL_BYPASS_EN (1 << 3)
+#define SPLL_REF_DIV(x) ((x) << 5)
+#define SPLL_REF_DIV_MASK (0x3f << 5)
+#define SPLL_PDIV_A(x) ((x) << 20)
+#define SPLL_PDIV_A_MASK (0x7f << 20)
+#define SPLL_PDIV_A_SHIFT 20
+#define CG_SPLL_FUNC_CNTL_2 0xC0500144
+#define SCLK_MUX_SEL(x) ((x) << 0)
+#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define CG_SPLL_FUNC_CNTL_3 0xC0500148
+#define SPLL_FB_DIV(x) ((x) << 0)
+#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
+#define SPLL_FB_DIV_SHIFT 0
+#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_FUNC_CNTL_4 0xC050014C
+
+#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
+#define SSEN (1 << 0)
+#define CLK_S(x) ((x) << 4)
+#define CLK_S_MASK (0xfff << 4)
+#define CLK_S_SHIFT 4
+#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
+#define CLK_V(x) ((x) << 0)
+#define CLK_V_MASK (0x3ffffff << 0)
+#define CLK_V_SHIFT 0
+
+#define MPLL_BYPASSCLK_SEL 0xC050019C
+# define MPLL_CLKOUT_SEL(x) ((x) << 8)
+# define MPLL_CLKOUT_SEL_MASK 0xFF00
#define CG_CLKPIN_CNTL 0xC05001A0
# define XTALIN_DIVIDE (1 << 1)
-
+# define BCLK_AS_XCLK (1 << 2)
+#define CG_CLKPIN_CNTL_2 0xC05001A4
+# define FORCE_BIF_REFCLK_EN (1 << 3)
+# define MUX_TCLK_TO_XCLK (1 << 8)
+#define THM_CLK_CNTL 0xC05001A8
+# define CMON_CLK_SEL(x) ((x) << 0)
+# define CMON_CLK_SEL_MASK 0xFF
+# define TMON_CLK_SEL(x) ((x) << 8)
+# define TMON_CLK_SEL_MASK 0xFF00
+#define MISC_CLK_CTRL 0xC05001AC
+# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
+# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
+# define ZCLK_SEL(x) ((x) << 8)
+# define ZCLK_SEL_MASK 0xFF00
+
+/* KV/KB */
+#define CG_THERMAL_INT_CTRL 0xC2100028
+#define DIG_THERM_INTH(x) ((x) << 0)
+#define DIG_THERM_INTH_MASK 0x000000FF
+#define DIG_THERM_INTH_SHIFT 0
+#define DIG_THERM_INTL(x) ((x) << 8)
+#define DIG_THERM_INTL_MASK 0x0000FF00
+#define DIG_THERM_INTL_SHIFT 8
+#define THERM_INTH_MASK (1 << 24)
+#define THERM_INTL_MASK (1 << 25)
+
+/* PCIE registers idx/data 0x38/0x3c */
+#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
+# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_0_SHIFT 24
+#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
+# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_1_SHIFT 24
+
+#define PCIE_CNTL2 0x1001001c /* PCIE */
+# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
+# define MST_MEM_LS_EN (1 << 18)
+# define REPLAY_MEM_LS_EN (1 << 19)
+
+#define PCIE_LC_STATUS1 0x1400028 /* PCIE */
+# define LC_REVERSE_RCVR (1 << 0)
+# define LC_REVERSE_XMIT (1 << 1)
+# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
+# define LC_OPERATING_LINK_WIDTH_SHIFT 2
+# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
+# define LC_DETECTED_LINK_WIDTH_SHIFT 5
+
+#define PCIE_P_CNTL 0x1400040 /* PCIE */
+# define P_IGNORE_EDB_ERR (1 << 6)
+
+#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */
+#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */
+
+#define PCIE_LC_CNTL 0x100100A0 /* PCIE */
+# define LC_L0S_INACTIVITY(x) ((x) << 8)
+# define LC_L0S_INACTIVITY_MASK (0xf << 8)
+# define LC_L0S_INACTIVITY_SHIFT 8
+# define LC_L1_INACTIVITY(x) ((x) << 12)
+# define LC_L1_INACTIVITY_MASK (0xf << 12)
+# define LC_L1_INACTIVITY_SHIFT 12
+# define LC_PMI_TO_L1_DIS (1 << 16)
+# define LC_ASPM_TO_L1_DIS (1 << 24)
+
+#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
+# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
+# define LC_DYN_LANES_PWR_STATE_SHIFT 21
+#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */
+# define LC_XMIT_N_FTS(x) ((x) << 0)
+# define LC_XMIT_N_FTS_MASK (0xff << 0)
+# define LC_XMIT_N_FTS_SHIFT 0
+# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
+# define LC_N_FTS_MASK (0xff << 24)
+#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_GEN3_EN_STRAP (1 << 1)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
+# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
+# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
+# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+# define LC_CURRENT_DATA_RATE_SHIFT 13
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
+# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
+# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
+
+#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */
+# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
+# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
+
+#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */
+# define LC_GO_TO_RECOVERY (1 << 30)
+#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */
+# define LC_REDO_EQ (1 << 5)
+# define LC_SET_QUIESCE (1 << 13)
+
+/* direct registers */
#define PCIE_INDEX 0x38
#define PCIE_DATA 0x3C
+#define SMC_IND_INDEX_0 0x200
+#define SMC_IND_DATA_0 0x204
+
+#define SMC_IND_ACCESS_CNTL 0x240
+#define AUTO_INCREMENT_IND_0 (1 << 0)
+
+#define SMC_MESSAGE_0 0x250
+#define SMC_MSG_MASK 0xffff
+#define SMC_RESP_0 0x254
+#define SMC_RESP_MASK 0xffff
+
+#define SMC_MSG_ARG_0 0x290
+
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_GFX_CNTL 0xE44
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
@@ -172,6 +526,10 @@
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
+#define VM_L2_CG 0x15c0
+#define MC_CG_ENABLE (1 << 18)
+#define MC_LS_ENABLE (1 << 19)
+
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
@@ -201,6 +559,17 @@
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_HUB_MISC_HUB_CG 0x20b8
+#define MC_HUB_MISC_VM_CG 0x20bc
+
+#define MC_HUB_MISC_SIP_CG 0x20c0
+
+#define MC_XPB_CLK_GAT 0x2478
+
+#define MC_CITF_MISC_RD_CG 0x2648
+#define MC_CITF_MISC_WR_CG 0x264c
+#define MC_CITF_MISC_VM_CG 0x2650
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -215,9 +584,37 @@
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
+#define MC_ARB_DRAM_TIMING 0x2774
+#define MC_ARB_DRAM_TIMING2 0x2778
+
+#define MC_ARB_BURST_TIME 0x2808
+#define STATE0(x) ((x) << 0)
+#define STATE0_MASK (0x1f << 0)
+#define STATE0_SHIFT 0
+#define STATE1(x) ((x) << 5)
+#define STATE1_MASK (0x1f << 5)
+#define STATE1_SHIFT 5
+#define STATE2(x) ((x) << 10)
+#define STATE2_MASK (0x1f << 10)
+#define STATE2_SHIFT 10
+#define STATE3(x) ((x) << 15)
+#define STATE3_MASK (0x1f << 15)
+#define STATE3_SHIFT 15
+
+#define MC_SEQ_RAS_TIMING 0x28a0
+#define MC_SEQ_CAS_TIMING 0x28a4
+#define MC_SEQ_MISC_TIMING 0x28a8
+#define MC_SEQ_MISC_TIMING2 0x28ac
+#define MC_SEQ_PMG_TIMING 0x28b0
+#define MC_SEQ_RD_CTL_D0 0x28b4
+#define MC_SEQ_RD_CTL_D1 0x28b8
+#define MC_SEQ_WR_CTL_D0 0x28bc
+#define MC_SEQ_WR_CTL_D1 0x28c0
+
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_PMG_AUTO_CMD 0x28d0
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
@@ -226,10 +623,92 @@
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
+#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
+#define MC_SEQ_MISC0_VEN_ID_VALUE 3
+#define MC_SEQ_MISC0_REV_ID_SHIFT 12
+#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
+#define MC_SEQ_MISC0_REV_ID_VALUE 1
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_MISC1 0x2a04
+#define MC_SEQ_RESERVE_M 0x2a08
+#define MC_PMG_CMD_EMRS 0x2a0c
+
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define MC_SEQ_MISC5 0x2a54
+#define MC_SEQ_MISC6 0x2a58
+
+#define MC_SEQ_MISC7 0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP 0x2a6c
+#define MC_SEQ_CAS_TIMING_LP 0x2a70
+#define MC_SEQ_MISC_TIMING_LP 0x2a74
+#define MC_SEQ_MISC_TIMING2_LP 0x2a78
+#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP 0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
+
+#define MC_PMG_CMD_MRS 0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP 0x2b20
+
+#define MC_PMG_CMD_MRS1 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
+#define MC_SEQ_PMG_TIMING_LP 0x2b4c
+
+#define MC_SEQ_WR_CTL_2 0x2b54
+#define MC_SEQ_WR_CTL_2_LP 0x2b58
+#define MC_PMG_CMD_MRS2 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
+
+#define MCLK_PWRMGT_CNTL 0x2ba0
+# define DLL_SPEED(x) ((x) << 0)
+# define DLL_SPEED_MASK (0x1f << 0)
+# define DLL_READY (1 << 6)
+# define MC_INT_CNTL (1 << 7)
+# define MRDCK0_PDNB (1 << 8)
+# define MRDCK1_PDNB (1 << 9)
+# define MRDCK0_RESET (1 << 16)
+# define MRDCK1_RESET (1 << 17)
+# define DLL_READY_READ (1 << 24)
+#define DLL_CNTL 0x2ba4
+# define MRDCK0_BYPASS (1 << 24)
+# define MRDCK1_BYPASS (1 << 25)
+
+#define MPLL_FUNC_CNTL 0x2bb4
+#define BWCTRL(x) ((x) << 20)
+#define BWCTRL_MASK (0xff << 20)
+#define MPLL_FUNC_CNTL_1 0x2bb8
+#define VCO_MODE(x) ((x) << 0)
+#define VCO_MODE_MASK (3 << 0)
+#define CLKFRAC(x) ((x) << 4)
+#define CLKFRAC_MASK (0xfff << 4)
+#define CLKF(x) ((x) << 16)
+#define CLKF_MASK (0xfff << 16)
+#define MPLL_FUNC_CNTL_2 0x2bbc
+#define MPLL_AD_FUNC_CNTL 0x2bc0
+#define YCLK_POST_DIV(x) ((x) << 0)
+#define YCLK_POST_DIV_MASK (7 << 0)
+#define MPLL_DQ_FUNC_CNTL 0x2bc4
+#define YCLK_SEL(x) ((x) << 4)
+#define YCLK_SEL_MASK (1 << 4)
+
+#define MPLL_SS1 0x2bcc
+#define CLKV(x) ((x) << 0)
+#define CLKV_MASK (0x3ffffff << 0)
+#define MPLL_SS2 0x2bd0
+#define CLKS(x) ((x) << 0)
+#define CLKS_MASK (0xfff << 0)
+
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -237,6 +716,26 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
+
+#define ATC_MISC_CG 0x3350
+
+#define MC_SEQ_CNTL_3 0x3600
+# define CAC_EN (1 << 31)
+#define MC_SEQ_G5PDX_CTRL 0x3604
+#define MC_SEQ_G5PDX_CTRL_LP 0x3608
+#define MC_SEQ_G5PDX_CMD0 0x360c
+#define MC_SEQ_G5PDX_CMD0_LP 0x3610
+#define MC_SEQ_G5PDX_CMD1 0x3614
+#define MC_SEQ_G5PDX_CMD1_LP 0x3618
+
+#define MC_SEQ_PMG_DVS_CTL 0x3628
+#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
+#define MC_SEQ_PMG_DVS_CMD 0x3630
+#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
+#define MC_SEQ_DLL_STBY 0x3638
+#define MC_SEQ_DLL_STBY_LP 0x363c
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
@@ -265,6 +764,9 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
+#define BIF_LNCNT_RESET 0x5220
+# define RESET_LNCNT_EN (1 << 0)
+
#define CONFIG_MEMSIZE 0x5428
#define INTERRUPT_CNTL 0x5468
@@ -401,6 +903,9 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
+# define STUTTER_ENABLE (1 << 0)
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -504,6 +1009,9 @@
#define CP_RB0_RPTR 0x8700
#define CP_RB_WPTR_DELAY 0x8704
+#define CP_RB_WPTR_POLL_CNTL 0x8708
+#define IDLE_POLL_COUNT(x) ((x) << 16)
+#define IDLE_POLL_COUNT_MASK (0xffff << 16)
#define CP_MEQ_THRESHOLDS 0x8764
#define MEQ1_START(x) ((x) << 0)
@@ -730,6 +1238,9 @@
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
+#define CP_MEM_SLP_CNTL 0xC1E4
+# define CP_MEM_LS_EN (1 << 0)
+
#define CP_CPF_DEBUG 0xC200
#define CP_PQ_WPTR_POLL_CNTL 0xC20C
@@ -775,14 +1286,20 @@
#define RLC_MC_CNTL 0xC30C
+#define RLC_MEM_SLP_CNTL 0xC318
+# define RLC_MEM_LS_EN (1 << 0)
+
#define RLC_LB_CNTR_MAX 0xC348
#define RLC_LB_CNTL 0xC364
+# define LOAD_BALANCE_ENABLE (1 << 0)
#define RLC_LB_CNTR_INIT 0xC36C
#define RLC_SAVE_AND_RESTORE_BASE 0xC374
-#define RLC_DRIVER_DMA_STATUS 0xC378
+#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */
+#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */
+#define RLC_PG_DELAY_2 0xC37C
#define RLC_GPM_UCODE_ADDR 0xC388
#define RLC_GPM_UCODE_DATA 0xC38C
@@ -791,12 +1308,52 @@
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
#define RLC_UCODE_CNTL 0xC39C
+#define RLC_GPM_STAT 0xC400
+# define RLC_GPM_BUSY (1 << 0)
+# define GFX_POWER_STATUS (1 << 1)
+# define GFX_CLOCK_STATUS (1 << 2)
+
+#define RLC_PG_CNTL 0xC40C
+# define GFX_PG_ENABLE (1 << 0)
+# define GFX_PG_SRC (1 << 1)
+# define DYN_PER_CU_PG_ENABLE (1 << 2)
+# define STATIC_PER_CU_PG_ENABLE (1 << 3)
+# define DISABLE_GDS_PG (1 << 13)
+# define DISABLE_CP_PG (1 << 15)
+# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17)
+# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18)
+
+#define RLC_CGTT_MGCG_OVERRIDE 0xC420
#define RLC_CGCG_CGLS_CTRL 0xC424
+# define CGCG_EN (1 << 0)
+# define CGLS_EN (1 << 1)
+
+#define RLC_PG_DELAY 0xC434
#define RLC_LB_INIT_CU_MASK 0xC43C
#define RLC_LB_PARAMS 0xC444
+#define RLC_PG_AO_CU_MASK 0xC44C
+
+#define RLC_MAX_PG_CU 0xC450
+# define MAX_PU_CU(x) ((x) << 0)
+# define MAX_PU_CU_MASK (0xff << 0)
+#define RLC_AUTO_PG_CTRL 0xC454
+# define AUTO_PG_EN (1 << 0)
+# define GRBM_REG_SGIT(x) ((x) << 3)
+# define GRBM_REG_SGIT_MASK (0xffff << 3)
+
+#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474
+#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478
+#define RLC_SERDES_WR_CTRL 0xC47C
+#define BPM_ADDR(x) ((x) << 0)
+#define BPM_ADDR_MASK (0xff << 0)
+#define CGLS_ENABLE (1 << 16)
+#define CGCG_OVERRIDE_0 (1 << 20)
+#define MGCG_OVERRIDE_0 (1 << 22)
+#define MGCG_OVERRIDE_1 (1 << 23)
+
#define RLC_SERDES_CU_MASTER_BUSY 0xC484
#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
# define SE_MASTER_BUSY_MASK 0x0000ffff
@@ -807,6 +1364,13 @@
#define RLC_GPM_SCRATCH_ADDR 0xC4B0
#define RLC_GPM_SCRATCH_DATA 0xC4B4
+#define RLC_GPR_REG2 0xC4E8
+#define REQ 0x00000001
+#define MESSAGE(x) ((x) << 1)
+#define MESSAGE_MASK 0x0000001e
+#define MSG_ENTER_RLC_SAFE_MODE 1
+#define MSG_EXIT_RLC_SAFE_MODE 0
+
#define CP_HPD_EOP_BASE_ADDR 0xC904
#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
#define CP_HPD_EOP_VMID 0xC90C
@@ -851,6 +1415,8 @@
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
+#define DB_RENDER_CONTROL 0x28000
+
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
@@ -944,6 +1510,16 @@
#define CP_PERFMON_CNTL 0x36020
+#define CGTS_SM_CTRL_REG 0x3c000
+#define SM_MODE(x) ((x) << 17)
+#define SM_MODE_MASK (0x7 << 17)
+#define SM_MODE_ENABLE (1 << 20)
+#define CGTS_OVERRIDE (1 << 21)
+#define CGTS_LS_OVERRIDE (1 << 22)
+#define ON_MONITOR_ADD_EN (1 << 23)
+#define ON_MONITOR_ADD(x) ((x) << 24)
+#define ON_MONITOR_ADD_MASK (0xff << 24)
+
#define CGTS_TCC_DISABLE 0x3c00c
#define CGTS_USER_TCC_DISABLE 0x3c010
#define TCC_DISABLE_MASK 0xFFFF0000
@@ -1176,6 +1752,8 @@
#define SDMA0_UCODE_ADDR 0xD000
#define SDMA0_UCODE_DATA 0xD004
+#define SDMA0_POWER_CNTL 0xD008
+#define SDMA0_CLK_CTRL 0xD00C
#define SDMA0_CNTL 0xD010
# define TRAP_ENABLE (1 << 0)
@@ -1300,6 +1878,13 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CGC_CTRL 0xF4B0
+# define DCM (1 << 0)
+# define CG_DT(x) ((x) << 2)
+# define CG_DT_MASK (0xf << 2)
+# define CLK_OD(x) ((x) << 6)
+# define CLK_OD_MASK (0x1f << 6)
+
/* UVD clocks */
#define CG_DCLK_CNTL 0xC050009C
@@ -1310,4 +1895,7 @@
#define CG_VCLK_CNTL 0xC05000A4
#define CG_VCLK_STATUS 0xC05000A8
+/* UVD CTX indirect */
+#define UVD_CGC_MEM_CTRL 0xC0
+
#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index c00339440c5..aa908c55a51 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def cayman_cs_data[] = {
+static const struct cs_section_def cayman_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 00000000000..c3982f9475f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+ 0, // HOLE
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+ {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+ {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+ { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 4791d856b7f..63a1ffbb3ce 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def evergreen_cs_data[] = {
+static const struct cs_section_def evergreen_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 7e5d0b570a3..95a66db08d9 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2166,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 00000000000..8953255e894
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "sid.h"
+
+static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg)
+{
+ u32 r;
+
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ return r;
+}
+
+static void dce6_endpoint_wreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ if (ASIC_IS_DCE8(rdev))
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ else
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
+ AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
+ WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+}
+
+#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
+#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
+
+
+static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ offset = rdev->audio.pin[i].offset;
+ tmp = RREG32_ENDPOINT(offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ rdev->audio.pin[i].connected = false;
+ else
+ rdev->audio.pin[i].connected = true;
+ }
+}
+
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
+{
+ int i;
+
+ dce6_afmt_get_connected_pins(rdev);
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ if (rdev->audio.pin[i].connected)
+ return &rdev->audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+void dce6_afmt_select_pin(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset = dig->afmt->offset;
+ u32 id = dig->afmt->pin->id;
+
+ if (!dig->afmt->pin)
+ return;
+
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+}
+
+void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 offset, tmp;
+ u8 *sadb;
+ int sad_count;
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+static int dce6_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return !ASIC_IS_NODCE(rdev);
+}
+
+static void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
+{
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
+ AUDIO_ENABLED);
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x5e00 - 0x5e00),
+ (0x5e18 - 0x5e00),
+ (0x5e30 - 0x5e00),
+ (0x5e48 - 0x5e00),
+ (0x5e60 - 0x5e00),
+ (0x5e78 - 0x5e00),
+ (0x5e90 - 0x5e00),
+};
+
+int dce6_audio_init(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
+ return 0;
+
+ rdev->audio.enabled = true;
+
+ if (ASIC_IS_DCE8(rdev))
+ rdev->audio.num_pins = 7;
+ else
+ rdev->audio.num_pins = 6;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ rdev->audio.pin[i].channels = -1;
+ rdev->audio.pin[i].rate = -1;
+ rdev->audio.pin[i].bits_per_sample = -1;
+ rdev->audio.pin[i].status_bits = 0;
+ rdev->audio.pin[i].category_code = 0;
+ rdev->audio.pin[i].connected = false;
+ rdev->audio.pin[i].offset = pin_offsets[i];
+ rdev->audio.pin[i].id = i;
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ }
+
+ return 0;
+}
+
+void dce6_audio_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!rdev->audio.enabled)
+ return;
+
+ for (i = 0; i < rdev->audio.num_pins; i++)
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
+
+ rdev->audio.enabled = false;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d5b49e33315..555164e270a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] =
#include "clearstate_evergreen.h"
-static u32 sumo_rlc_save_restore_register_list[] =
+static const u32 sumo_rlc_save_restore_register_list[] =
{
0x98fc,
0x9830,
@@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] =
0x9150,
0x802c,
};
-static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -141,6 +140,12 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
extern void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr);
+void cik_init_cp_pg_table(struct radeon_device *rdev);
+
+extern u32 si_get_csb_size(struct radeon_device *rdev);
+extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern u32 cik_get_csb_size(struct radeon_device *rdev);
+extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
static const u32 evergreen_golden_registers[] =
{
@@ -1807,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1830,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -2881,8 +2903,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3613,7 +3635,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3839,28 +3861,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * evergreen_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/*
* RLC
*/
@@ -3894,147 +3894,231 @@ void sumo_rlc_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
+
+ /* clear state block */
+ if (rdev->rlc.cp_table_obj) {
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ radeon_bo_unref(&rdev->rlc.cp_table_obj);
+ rdev->rlc.cp_table_obj = NULL;
+ }
}
+#define CP_ME_TABLE_SIZE 96
+
int sumo_rlc_init(struct radeon_device *rdev)
{
- u32 *src_ptr;
+ const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
+ u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
u64 reg_list_mc_addr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
int r;
src_ptr = rdev->rlc.reg_list;
dws = rdev->rlc.reg_list_size;
+ if (rdev->family >= CHIP_BONAIRE) {
+ dws += (5 * 16) + 48 + 48 + 64;
+ }
cs_data = rdev->rlc.cs_data;
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (src_ptr) {
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.save_restore_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
+ r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = rdev->rlc.sr_ptr;
+ if (rdev->family >= CHIP_TAHITI) {
+ /* SI */
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ dst_ptr[i] = src_ptr[i];
+ } else {
+ /* ON/LN/TN */
+ /* format:
+ * dw0: (reg2 << 16) | reg1
+ * dw1: reg1 save space
+ * dw2: reg2 save space
+ */
+ for (i = 0; i < dws; i++) {
+ data = src_ptr[i] >> 2;
+ i++;
+ if (i < dws)
+ data |= (src_ptr[i] >> 2) << 16;
+ j = (((i - 1) * 3) / 2);
+ dst_ptr[j] = data;
+ }
+ j = ((i * 3) / 2);
+ dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+ }
+ radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
}
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- /* format:
- * dw0: (reg2 << 16) | reg1
- * dw1: reg1 save space
- * dw2: reg2 save space
- */
- for (i = 0; i < dws; i++) {
- data = src_ptr[i] >> 2;
- i++;
- if (i < dws)
- data |= (src_ptr[i] >> 2) << 16;
- j = (((i - 1) * 3) / 2);
- dst_ptr[j] = data;
- }
- j = ((i * 3) / 2);
- dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
-
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (cs_data) {
+ /* clear state block */
+ if (rdev->family >= CHIP_BONAIRE) {
+ rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ rdev->rlc.clear_state_size = si_get_csb_size(rdev);
+ dws = rdev->rlc.clear_state_size + (256 / 4);
+ } else {
+ reg_list_num = 0;
+ dws = 0;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_list_num++;
+ dws += cs_data[i].section[j].reg_count;
+ }
+ }
+ reg_list_blk_index = (3 * reg_list_num + 2);
+ dws += reg_list_blk_index;
+ rdev->rlc.clear_state_size = dws;
}
- }
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (rdev->rlc.clear_state_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.clear_state_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
-
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
+ r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = rdev->rlc.cs_ptr;
+ if (rdev->family >= CHIP_BONAIRE) {
+ cik_get_csb_buffer(rdev, dst_ptr);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
+ dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
+ dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
+ dst_ptr[2] = rdev->rlc.clear_state_size;
+ si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
+ } else {
+ reg_list_hdr_blk_index = 0;
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
+ data = upper_32_bits(reg_list_mc_addr);
dst_ptr[reg_list_hdr_blk_index] = data;
reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_num = cs_data[i].section[j].reg_count;
+ data = reg_list_mc_addr & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = 0x08000000 | (reg_num * 4);
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ for (k = 0; k < reg_num; k++) {
+ data = cs_data[i].section[j].extent[k];
+ dst_ptr[reg_list_blk_index + k] = data;
+ }
+ reg_list_mc_addr += reg_num * 4;
+ reg_list_blk_index += reg_num;
+ }
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
+ dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
}
+ radeon_bo_kunmap(rdev->rlc.clear_state_obj);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (rdev->rlc.cp_table_size) {
+ if (rdev->rlc.cp_table_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.cp_table_gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+
+ cik_init_cp_pg_table(rdev);
+
+ radeon_bo_kunmap(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ }
return 0;
}
@@ -4959,143 +5043,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (evergreen-SI).
- */
-void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-}
-
-/**
- * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (evergreen).
- */
-void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * evergreen_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (evergreen-cayman).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFFF)
- cur_size_in_dw = 0xFFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -5106,6 +5053,11 @@ static int evergreen_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
evergreen_mc_program(rdev);
if (ASIC_IS_DCE5(rdev)) {
@@ -5131,10 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5144,17 +5092,11 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
rdev->rlc.cs_data = evergreen_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -5180,7 +5122,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -5209,14 +5151,14 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
@@ -5232,12 +5174,11 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
@@ -5292,7 +5233,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -5420,7 +5361,6 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -5430,7 +5370,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
deleted file mode 100644
index 057c87b6515..00000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "evergreend.h"
-#include "evergreen_blit_shaders.h"
-#include "cayman_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* emits 17 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, pitch);
- radeon_ring_write(ring, slice);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- if (rdev->family >= CHIP_CAYMAN) {
- /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
- * to the RB directly. For IBs, the CP programs this as part of the
- * surface_sync packet.
- */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
- }
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 11dw + 1 surface sync = 16dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 1);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 2);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 10 + 1 sync (5) = 15 */
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
-
- /* high addr, stride */
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
- /* xyzw swizzles */
- sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
- SQ_VTCX_SEL_Y(SQ_SEL_Y) |
- SQ_VTCX_SEL_Z(SQ_SEL_Z) |
- SQ_VTCX_SEL_W(SQ_SEL_W);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0x580);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1); /* size */
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, sq_vtx_constant_word3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-
-}
-
-/* emits 10 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_tex_resource_word0, sq_tex_resource_word1;
- u32 sq_tex_resource_word4, sq_tex_resource_word7;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
- sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
- ((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) |
- TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- /* xyzw swizzles */
- sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
- TEX_DST_SEL_Y(SQ_SEL_Y) |
- TEX_DST_SEL_Z(SQ_SEL_Z) |
- TEX_DST_SEL_W(SQ_SEL_W);
-
- sq_tex_resource_word7 = format |
- S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word7);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- /* workaround some hw bugs */
- if (x2 == 0)
- x1 = 1;
- if (y2 == 0)
- y1 = 1;
- if (rdev->family >= CHIP_CAYMAN) {
- if ((x2 == 1) && (y2 == 1))
- x2 = 2;
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 39 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
- u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
- u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs;
- int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_hs_threads, num_ls_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- int num_hs_stack_entries, num_ls_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- /* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
-
- if (rdev->family < CHIP_CAYMAN) {
- switch (rdev->family) {
- case CHIP_CEDAR:
- default:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_REDWOOD:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_JUNIPER:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_PALM:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO2:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_BARTS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_TURKS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_CAICOS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 10;
- num_gs_threads = 10;
- num_es_threads = 10;
- num_hs_threads = 10;
- num_ls_threads = 10;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- }
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (EXPORT_SRC_C |
- CS_PRIO(0) |
- LS_PRIO(0) |
- HS_PRIO(0) |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
- NUM_LS_GPRS(num_ls_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
- NUM_LS_THREADS(num_ls_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
- sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
- NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
-
- /* disable dyn gprs */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
-
- /* setup LDS */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0x10001000);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_thread_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_3);
- }
-
- /* CONTEXT_CONTROL */
- radeon_ring_write(ring, 0xc0012800);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
-
- /* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* SET_SAMPLER */
- radeon_ring_write(ring, 0xc0036e00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000012);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
-}
-
-int evergreen_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 74;
- if (rdev->family >= CHIP_CAYMAN)
- rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
-
- rdev->r600_blit.max_dim = 16384;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family < CHIP_CAYMAN)
- rdev->r600_blit.state_len = evergreen_default_size;
- else
- rdev->r600_blit.state_len = cayman_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_vs_size * 4;
- else
- obj_size += cayman_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_ps_size * 4;
- else
- obj_size += cayman_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (!rdev->r600_blit.shader_obj) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("evergreen failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
-
- if (rdev->family < CHIP_CAYMAN) {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- evergreen_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < evergreen_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
- for (i = 0; i < evergreen_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
- } else {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- cayman_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < cayman_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
- for (i = 0; i < cayman_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
- }
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index f85c0af115b..d43383470cd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -300,58 +300,4 @@ const u32 evergreen_default_state[] =
0x00000010, /* */
};
-const u32 evergreen_vs[] =
-{
- 0x00000004,
- 0x80800400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15200688,
- 0x00000000,
- 0x00000000,
- 0x3c000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
- 0x00080000,
-#endif
- 0x00000000,
- 0x1c000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 evergreen_ps[] =
-{
- 0x00000003,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95200688,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000000,
- 0x00000000,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
-const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 00000000000..6a0656d00ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b0e280058b9..f71ce390aeb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -32,6 +32,10 @@
#include "evergreend.h"
#include "atom.h"
+extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
+extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
+extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -54,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -157,22 +200,26 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
if (!dig || !dig->afmt)
return;
- if (max_ratio >= 8) {
- dto_phase = 192 * 1000;
- wallclock_ratio = 3;
- } else if (max_ratio >= 4) {
- dto_phase = 96 * 1000;
- wallclock_ratio = 2;
- } else if (max_ratio >= 2) {
- dto_phase = 48 * 1000;
- wallclock_ratio = 1;
- } else {
+ if (ASIC_IS_DCE6(rdev)) {
dto_phase = 24 * 1000;
- wallclock_ratio = 0;
+ } else {
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
}
- dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
- WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
@@ -260,13 +307,23 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
AFMT_60958_CS_CHANNEL_NUMBER_7(8));
- /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_write_speaker_allocation(encoder);
+ } else {
+ dce4_afmt_write_speaker_allocation(encoder);
+ }
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
/* fglrx sets 0x40 in 0x5f80 here */
- evergreen_hdmi_write_sad_regs(encoder);
+
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_select_pin(encoder);
+ dce6_afmt_write_sad_regs(encoder);
+ } else {
+ evergreen_hdmi_write_sad_regs(encoder);
+ }
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -302,6 +359,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -314,6 +373,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable) {
+ if (ASIC_IS_DCE6(rdev))
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ } else {
+ dig->afmt->pin = NULL;
+ }
+
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0d582ac1dc3..8768fd6a1e2 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -714,6 +714,13 @@
#define AFMT_GENERIC0_7 0x7138
/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -1153,6 +1160,10 @@
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 00000000000..ecd60809db4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2645 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "kv_dpm.h"
+#include "radeon_asic.h"
+#include <linux/seq_file.h>
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define KV_MINIMUM_ENGINE_CLOCK 800
+#define SMC_RAM_END 0x40000
+
+static void kv_init_graphics_levels(struct radeon_device *rdev);
+static int kv_calculate_ds_divider(struct radeon_device *rdev);
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
+static int kv_calculate_dpm_settings(struct radeon_device *rdev);
+static void kv_enable_new_levels(struct radeon_device *rdev);
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps);
+static int kv_set_enabled_levels(struct radeon_device *rdev);
+static int kv_force_dpm_highest(struct radeon_device *rdev);
+static int kv_force_dpm_lowest(struct radeon_device *rdev);
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp);
+static int kv_init_fps_limits(struct radeon_device *rdev);
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
+
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 2 },
+ { 4, 1, 1 },
+ { 5, 5, 2 },
+ { 6, 6, 1 },
+ { 7, 9, 2 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 1 },
+ { 4, 1, 1 },
+ { 5, 5, 1 },
+ { 6, 6, 1 },
+ { 7, 9, 1 },
+ { 8, 4, 1 },
+ { 9, 2, 1 },
+ { 10, 3, 1 },
+ { 11, 6, 1 },
+ { 12, 8, 2 },
+ { 13, 1, 1 },
+ { 14, 2, 1 },
+ { 15, 3, 1 },
+ { 16, 1, 1 },
+ { 17, 4, 1 },
+ { 18, 3, 1 },
+ { 19, 1, 1 },
+ { 20, 8, 1 },
+ { 21, 5, 1 },
+ { 22, 1, 1 },
+ { 23, 1, 1 },
+ { 24, 4, 1 },
+ { 27, 6, 1 },
+ { 28, 1, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+ { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+ { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+ { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+ { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+ { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+ { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
+{
+ struct kv_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct radeon_device *rdev,
+ const struct kv_lcac_config_values *local_cac_table,
+ const struct kv_lcac_config_reg *local_cac_reg)
+{
+ u32 i, count, data;
+ const struct kv_lcac_config_values *values = local_cac_table;
+
+ while (values->block_id != 0xffffffff) {
+ count = values->signal_id;
+ for (i = 0; i < count; i++) {
+ data = ((values->block_id << local_cac_reg->block_shift) &
+ local_cac_reg->block_mask);
+ data |= ((i << local_cac_reg->signal_shift) &
+ local_cac_reg->signal_mask);
+ data |= ((values->t << local_cac_reg->t_shift) &
+ local_cac_reg->t_mask);
+ data |= ((1 << local_cac_reg->enable_shift) &
+ local_cac_reg->enable_mask);
+ WREG32_SMC(local_cac_reg->cntl, data);
+ }
+ values++;
+ }
+}
+#endif
+
+static int kv_program_pt_config_registers(struct radeon_device *rdev,
+ const struct kv_pt_config_reg *cac_config_regs)
+{
+ const struct kv_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == KV_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+ cache = 0;
+
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ }
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int kv_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping ||
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(rdev, didt_config_kv);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->caps_cac) {
+ WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+ WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
+ WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+ }
+}
+#endif
+
+static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
+ if (ret)
+ pi->cac_enabled = false;
+ else
+ pi->cac_enabled = true;
+ } else if (pi->cac_enabled) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_process_firmware_header(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->dpm_table_start = tmp;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->soft_regs_start = tmp;
+
+ return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_voltage_change_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+ &pi->graphics_voltage_change_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_interval(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+ &pi->graphics_interval,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+ &pi->graphics_boot_level,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static void kv_program_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+}
+
+static void kv_clear_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0);
+}
+
+static int kv_set_divider_value(struct radeon_device *rdev,
+ u32 index, u32 sclk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
+ if (ret)
+ return ret;
+
+ pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+ pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+ return 0;
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
+ u16 voltage)
+{
+ return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
+ u32 vid_2bit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
+
+ return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+ pi->graphics_level[index].MinVddNb =
+ cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
+
+ return 0;
+}
+
+static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+ return 0;
+}
+
+static void kv_dpm_power_level_enable(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ kv_smc_dpm_enable(rdev, true);
+}
+
+static void kv_stop_dpm(struct radeon_device *rdev)
+{
+ kv_smc_dpm_enable(rdev, false);
+}
+
+static void kv_start_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
+{
+ return kv_notify_message_to_smu(rdev, freeze ?
+ PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct radeon_device *rdev)
+{
+ return kv_force_dpm_lowest(rdev);
+}
+
+static int kv_unforce_levels(struct radeon_device *rdev)
+{
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+}
+
+static int kv_update_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 low_sclk_interrupt_t = 0;
+ int ret = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+ }
+ return ret;
+}
+
+static int kv_program_bootup_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ if (table->num_max_dpm_entries == 0)
+ return -EINVAL;
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+ return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_therm_throttle_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+ &pi->graphics_therm_throttle_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_upload_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+ (u8 *)&pi->graphics_level,
+ sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+ pi->sram_end);
+
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
+ &pi->graphics_dpm_level_count,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static u32 kv_get_clock_difference(u32 a, u32 b)
+{
+ return (a >= b) ? a - b : b - a;
+}
+
+static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 value;
+
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(clk, 40000) < 200)
+ value = 3;
+ else if (kv_get_clock_difference(clk, 30000) < 200)
+ value = 2;
+ else if (kv_get_clock_difference(clk, 20000) < 200)
+ value = 7;
+ else if (kv_get_clock_difference(clk, 15000) < 200)
+ value = 6;
+ else if (kv_get_clock_difference(clk, 10000) < 200)
+ value = 8;
+ else
+ value = 0;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static int kv_populate_uvd_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->uvd_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t < table->entries[i].v))
+ break;
+
+ pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
+ pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
+ pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
+
+ pi->uvd_level[i].VClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
+ pi->uvd_level[i].DClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].vclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].dclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
+
+ pi->uvd_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
+ (u8 *)&pi->uvd_level_count,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->uvd_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UVDInterval),
+ &pi->uvd_interval,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevel),
+ (u8 *)&pi->uvd_level,
+ sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
+ pi->sram_end);
+
+ return ret;
+
+}
+
+static int kv_populate_vce_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+ u32 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->vce_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
+ pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->vce_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].evclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->vce_level[i].Divider = (u8)dividers.post_div;
+
+ pi->vce_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
+ (u8 *)&pi->vce_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->vce_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VCEInterval),
+ (u8 *)&pi->vce_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevel),
+ (u8 *)&pi->vce_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
+ pi->sram_end);
+
+ return ret;
+}
+
+static int kv_populate_samu_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->samu_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->samu_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->samu_level[i].Divider = (u8)dividers.post_div;
+
+ pi->samu_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
+ (u8 *)&pi->samu_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->samu_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
+ (u8 *)&pi->samu_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevel),
+ (u8 *)&pi->samu_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+static int kv_populate_acp_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->acp_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->acp_level[i].Divider = (u8)dividers.post_div;
+
+ pi->acp_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
+ (u8 *)&pi->acp_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->acp_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, ACPInterval),
+ (u8 *)&pi->acp_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevel),
+ (u8 *)&pi->acp_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ }
+}
+
+static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
+}
+
+static void kv_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void kv_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int kv_dpm_enable(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ret = kv_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("kv_process_firmware_header failed\n");
+ return ret;
+ }
+ kv_init_fps_limits(rdev);
+ kv_init_graphics_levels(rdev);
+ ret = kv_program_bootup_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_program_bootup_state failed\n");
+ return ret;
+ }
+ kv_calculate_dfs_bypass_settings(rdev);
+ ret = kv_upload_dpm_settings(rdev);
+ if (ret) {
+ DRM_ERROR("kv_upload_dpm_settings failed\n");
+ return ret;
+ }
+ ret = kv_populate_uvd_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_uvd_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_vce_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_vce_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_samu_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_samu_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_acp_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_acp_table failed\n");
+ return ret;
+ }
+ kv_program_vc(rdev);
+#if 0
+ kv_initialize_hardware_cac_manager(rdev);
+#endif
+ kv_start_am(rdev);
+ if (pi->enable_auto_thermal_throttling) {
+ ret = kv_enable_auto_thermal_throttling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ return ret;
+ }
+ }
+ ret = kv_enable_dpm_voltage_scaling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_interval(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_interval failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_boot_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ return ret;
+ }
+ ret = kv_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_ulv failed\n");
+ return ret;
+ }
+ kv_start_dpm(rdev);
+ ret = kv_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_didt failed\n");
+ return ret;
+ }
+ ret = kv_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_smc_cac failed\n");
+ return ret;
+ }
+
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+ }
+
+ /* powerdown unused blocks for now */
+ kv_dpm_powergate_acp(rdev, true);
+ kv_dpm_powergate_samu(rdev, true);
+ kv_dpm_powergate_vce(rdev, true);
+ kv_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return ret;
+}
+
+void kv_dpm_disable(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ /* powerup blocks */
+ kv_dpm_powergate_acp(rdev, false);
+ kv_dpm_powergate_samu(rdev, false);
+ kv_dpm_powergate_vce(rdev, false);
+ kv_dpm_powergate_uvd(rdev, false);
+
+ kv_enable_smc_cac(rdev, false);
+ kv_enable_didt(rdev, false);
+ kv_clear_vc(rdev);
+ kv_stop_dpm(rdev);
+ kv_enable_ulv(rdev, false);
+ kv_reset_am(rdev);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+#if 0
+static int kv_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
+ (u8 *)&value, sizeof(u16), pi->sram_end);
+}
+
+static int kv_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static void kv_init_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static int kv_init_fps_limits(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ pi->fps_high_t = cpu_to_be16(tmp);
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsHighT),
+ (u8 *)&pi->fps_high_t,
+ sizeof(u16), pi->sram_end);
+
+ tmp = 30;
+ pi->fps_low_t = cpu_to_be16(tmp);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsLowT),
+ (u8 *)&pi->fps_low_t,
+ sizeof(u16), pi->sram_end);
+
+ }
+ return ret;
+}
+
+static void kv_init_powergate_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->uvd_power_gated = false;
+ pi->vce_power_gated = false;
+ pi->samu_power_gated = false;
+ pi->acp_power_gated = false;
+
+}
+
+static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
+}
+
+#if 0
+static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
+}
+#endif
+
+static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
+}
+
+static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+ pi->uvd_boot_level = table->count - 1;
+ else
+ pi->uvd_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
+ (uint8_t *)&pi->uvd_boot_level,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (!pi->caps_uvd_dpm ||
+ pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (1 << pi->uvd_boot_level));
+ }
+
+ return kv_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= 0) /* XXX */
+ break;
+ }
+
+ return i;
+}
+
+static int kv_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ int ret;
+
+ if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
+ if (pi->caps_stable_p_state)
+ pi->vce_boot_level = table->count - 1;
+ else
+ pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
+ (u8 *)&pi->vce_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (1 << pi->vce_boot_level));
+
+ kv_enable_vce_dpm(rdev, true);
+ } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
+ kv_enable_vce_dpm(rdev, false);
+ }
+
+ return 0;
+}
+#endif
+
+static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->samu_boot_level = table->count - 1;
+ else
+ pi->samu_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
+ (u8 *)&pi->samu_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (1 << pi->samu_boot_level));
+ }
+
+ return kv_enable_samu_dpm(rdev, !gate);
+}
+
+static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->acp_boot_level = table->count - 1;
+ else
+ pi->acp_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
+ (u8 *)&pi->acp_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+
+ return kv_enable_acp_dpm(rdev, !gate);
+}
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_uvd_pg) {
+ uvd_v1_0_stop(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ if (pi->caps_uvd_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
+ } else {
+ if (pi->caps_uvd_pg) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
+ uvd_v4_2_resume(rdev);
+ uvd_v1_0_start(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ }
+}
+
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->vce_power_gated == gate)
+ return;
+
+ pi->vce_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
+ }
+}
+
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->samu_power_gated == gate)
+ return;
+
+ pi->samu_power_gated = gate;
+
+ if (gate) {
+ kv_update_samu_dpm(rdev, true);
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
+ } else {
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
+ kv_update_samu_dpm(rdev, false);
+ }
+}
+
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->acp_power_gated == gate)
+ return;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ pi->acp_power_gated = gate;
+
+ if (gate) {
+ kv_update_acp_dpm(rdev, true);
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
+ } else {
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
+ kv_update_acp_dpm(rdev, false);
+ }
+}
+
+static void kv_set_valid_clock_range(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
+ (i == (pi->graphics_dpm_level_count - 1))) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
+ (i == 0)) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
+ (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
+ if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
+ i == (int)(pi->graphics_dpm_level_count - 1)) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if (table->entries[i].sclk_frequency <=
+ new_ps->levels[new_ps->num_levels - 1].sclk ||
+ i == 0) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk -
+ table->entries[pi->highest_valid].sclk_frequency) >
+ (table->entries[pi->lowest_valid].sclk_frequency -
+ new_ps->levels[new_ps->num_levels -1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ }
+}
+
+static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+ u8 clk_bypass_cntl;
+
+ if (pi->caps_enable_dfs_bypass) {
+ clk_bypass_cntl = new_ps->need_dfs_bypass ?
+ pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
+ ret = kv_copy_bytes_to_smc(rdev,
+ (pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
+ (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
+ offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
+ &clk_bypass_cntl,
+ sizeof(u8), pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int kv_enable_nb_dpm(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+
+ return ret;
+}
+
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ ret = kv_force_dpm_highest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = kv_force_dpm_lowest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ ret = kv_unforce_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ kv_update_requested_ps(rdev, new_ps);
+
+ kv_apply_state_adjust_rules(rdev,
+ &pi->requested_rps,
+ &pi->current_rps);
+
+ return 0;
+}
+
+int kv_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ /*struct radeon_ps *old_ps = &pi->current_rps;*/
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (rdev->family == CHIP_KABINI) {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_enable_new_levels(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_unforce_levels(rdev);
+ kv_set_enabled_levels(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ }
+ } else {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ kv_enable_nb_dpm(rdev);
+ }
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+ return 0;
+}
+
+void kv_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ kv_update_current_ps(rdev, new_ps);
+}
+
+void kv_dpm_setup_asic(struct radeon_device *rdev)
+{
+ sumo_take_smu_control(rdev, true);
+ kv_init_powergate_state(rdev);
+ kv_init_sclk_t(rdev);
+}
+
+void kv_dpm_reset_asic(struct radeon_device *rdev)
+{
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+}
+
+//XXX use sumo_dpm_display_configuration_changed
+
+static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
+ int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
+ table->sclk =
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
+ table->vddc =
+ kv_convert_2bit_index_to_voltage(rdev,
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
+ }
+
+ table->mclk = pi->sys_info.nbp_memory_clock[0];
+}
+
+static void kv_patch_voltage_values(struct radeon_device *rdev)
+{
+ int i;
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+
+ if (table->count) {
+ for (i = 0; i < table->count; i++)
+ table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ table->entries[i].v);
+ }
+
+}
+
+static void kv_construct_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+ pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+ pi->boot_pl.ds_divider_index = 0;
+ pi->boot_pl.ss_divider_index = 0;
+ pi->boot_pl.allow_gnb_slow = 1;
+ pi->boot_pl.force_nbp_state = 0;
+ pi->boot_pl.display_wm = 0;
+ pi->boot_pl.vce_wm = 0;
+}
+
+static int kv_force_dpm_highest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static int kv_force_dpm_lowest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ u32 temp;
+ u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ if (!pi->caps_sclk_ds)
+ return 0;
+
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ temp = sclk / sumo_get_sleep_divider_from_id(i);
+ if ((temp >= min) || (i == 0))
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ int i;
+
+ if (table && table->count) {
+ for (i = table->count - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ }
+
+ *limit = 0;
+ return 0;
+}
+
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
+{
+ struct kv_ps *ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 min_sclk = 10000; /* ??? */
+ u32 sclk, mclk = 0;
+ int i, limit;
+ bool force_high;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 stable_p_state_sclk = 0;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ mclk = max_limits->mclk;
+ sclk = min_sclk;
+
+ if (pi->caps_stable_p_state) {
+ stable_p_state_sclk = (max_limits->sclk * 75) / 100;
+
+ for (i = table->count - 1; i >= 0; i++) {
+ if (stable_p_state_sclk >= table->entries[i].clk) {
+ stable_p_state_sclk = table->entries[i].clk;
+ break;
+ }
+ }
+
+ if (i > 0)
+ stable_p_state_sclk = table->entries[0].clk;
+
+ sclk = stable_p_state_sclk;
+ }
+
+ ps->need_dfs_bypass = true;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (ps->levels[i].sclk < sclk)
+ ps->levels[i].sclk = sclk;
+ }
+
+ if (table && table->count) {
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].clk;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].sclk_frequency;
+ }
+ }
+ }
+
+ if (pi->caps_stable_p_state) {
+ for (i = 0; i < ps->num_levels; i++) {
+ ps->levels[i].sclk = stable_p_state_sclk;
+ }
+ }
+
+ pi->video_start = new_rps->dclk || new_rps->vclk;
+
+ if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->family == CHIP_KABINI) {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x1;
+ ps->dpmx_nb_ps_hi = 0x0;
+ } else {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x2;
+ ps->dpmx_nb_ps_hi = 0x1;
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->disable_nb_ps3_in_battery;
+ ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x2;
+ ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpmx_nb_ps_hi = 0x2;
+ }
+ }
+}
+
+static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
+}
+
+static int kv_calculate_ds_divider(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 sclk_in_sr = 10000; /* ??? */
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].DeepSleepDivId =
+ kv_get_sleep_divider_id_from_clock(rdev,
+ be32_to_cpu(pi->graphics_level[i].SclkFrequency),
+ sclk_in_sr);
+ }
+ return 0;
+}
+
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ bool force_high;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ u32 mclk = max_limits->mclk;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ if (rdev->family == CHIP_KABINI) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (!pi->sys_info.nb_dpm_enable)
+ return 0;
+
+ force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+
+ if (force_high) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].GnbSlow = 0;
+ } else {
+ if (pi->battery_state)
+ pi->graphics_level[0].ForceNbPs1 = 1;
+
+ pi->graphics_level[1].GnbSlow = 0;
+ pi->graphics_level[2].GnbSlow = 0;
+ pi->graphics_level[3].GnbSlow = 0;
+ pi->graphics_level[4].GnbSlow = 0;
+ }
+ } else {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ pi->graphics_level[pi->lowest_valid].UpH = 0x28;
+ pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
+ if (pi->lowest_valid != pi->highest_valid)
+ pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
+ }
+ }
+ return 0;
+}
+
+static int kv_calculate_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
+
+ return 0;
+}
+
+static void kv_init_graphics_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ u32 vid_2bit;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].clk);
+ vid_2bit = sumo_convert_vid7_to_vid2(rdev,
+ &pi->sys_info.vid_mapping_table,
+ table->entries[i].v);
+ kv_set_vid(rdev, i, vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->num_max_dpm_entries; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t <
+ kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
+ kv_set_vid(rdev, i, table->entries[i].vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ }
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ kv_dpm_power_level_enable(rdev, i, false);
+}
+
+static void kv_enable_new_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (i >= pi->lowest_valid && i <= pi->highest_valid)
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+}
+
+static int kv_set_enabled_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i, new_mask = 0;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ new_mask |= (1 << i);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 nbdpmconfig1;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ if (pi->sys_info.nb_dpm_enable) {
+ nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
+ nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
+ DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
+ nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
+ Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
+ DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
+ DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
+ WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
+ }
+}
+
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
+ tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
+ tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
+ DIG_THERM_INTL(49 + (low_temp / 1000)));
+ WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
+
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static int kv_parse_sys_info_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+ int i;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (crev != 8) {
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ return -EINVAL;
+ }
+ pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
+ pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
+ pi->sys_info.bootup_nb_voltage_index =
+ le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
+ if (igp_info->info_8.ucHtcTmpLmt == 0)
+ pi->sys_info.htc_tmp_lmt = 203;
+ else
+ pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
+ if (igp_info->info_8.ucHtcHystLmt == 0)
+ pi->sys_info.htc_hyst_lmt = 5;
+ else
+ pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
+ if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+ DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ }
+
+ if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
+ pi->sys_info.nb_dpm_enable = true;
+ else
+ pi->sys_info.nb_dpm_enable = false;
+
+ for (i = 0; i < KV_NUM_NBPSTATES; i++) {
+ pi->sys_info.nbp_memory_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
+ pi->sys_info.nbp_n_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
+ }
+ if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
+ SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ pi->caps_enable_dfs_bypass = true;
+
+ sumo_construct_sclk_voltage_mapping_table(rdev,
+ &pi->sys_info.sclk_voltage_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ sumo_construct_vid_mapping_table(rdev,
+ &pi->sys_info.vid_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ kv_construct_max_power_limits_table(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ }
+ return 0;
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void kv_patch_boot_state(struct radeon_device *rdev,
+ struct kv_ps *ps)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ ps->num_levels = 1;
+ ps->levels[0] = pi->boot_pl;
+}
+
+static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.dpm.boot_ps = rps;
+ kv_patch_boot_state(rdev, ps);
+ }
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *ps = kv_get_ps(rps);
+ struct kv_pl *pl = &ps->levels[index];
+ u32 sclk;
+
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ pl->sclk = sclk;
+ pl->vddc_index = clock_info->sumo.vddcIndex;
+
+ ps->num_levels = index + 1;
+
+ if (pi->caps_sclk_ds) {
+ pl->ds_divider_index = 5;
+ pl->ss_divider_index = 5;
+ }
+}
+
+static int kv_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct kv_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ kv_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int kv_dpm_init(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi;
+ int ret, i;
+
+ pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ pi->at[i] = TRINITY_AT_DFLT;
+
+ pi->sram_end = SMC_RAM_END;
+
+ if (rdev->family == CHIP_KABINI)
+ pi->high_voltage_t = 4001;
+
+ pi->enable_nb_dpm = true;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = true;
+ pi->enable_didt = false;
+ if (pi->enable_didt) {
+ pi->caps_sq_ramping = true;
+ pi->caps_db_ramping = true;
+ pi->caps_td_ramping = true;
+ pi->caps_tcp_ramping = true;
+ }
+
+ pi->caps_sclk_ds = true;
+ pi->enable_auto_thermal_throttling = true;
+ pi->disable_nb_ps3_in_battery = false;
+ pi->bapm_enable = true;
+ pi->voltage_drop_t = 0;
+ pi->caps_sclk_throttle_low_notification = false;
+ pi->caps_fps = false; /* true? */
+ pi->caps_uvd_pg = true;
+ pi->caps_uvd_dpm = true;
+ pi->caps_vce_pg = false;
+ pi->caps_samu_pg = false;
+ pi->caps_acp_pg = false;
+ pi->caps_stable_p_state = false;
+
+ ret = kv_parse_sys_info_table(rdev);
+ if (ret)
+ return ret;
+
+ kv_patch_voltage_values(rdev);
+ kv_construct_boot_state(rdev);
+
+ ret = kv_parse_power_table(rdev);
+ if (ret)
+ return ret;
+
+ pi->enable_dpm = true;
+
+ return 0;
+}
+
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 current_index =
+ (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+ CURR_SCLK_INDEX_SHIFT;
+ u32 sclk, tmp;
+ u16 vddc;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+ SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
+ vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+ seq_printf(m, "power level %d sclk: %u vddc: %u\n",
+ current_index, sclk, vddc);
+ }
+}
+
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ int i;
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->num_levels; i++) {
+ struct kv_pl *pl = &ps->levels[i];
+ printk("\t\tpower level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+void kv_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
+}
+
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+
+}
+
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->levels[0].sclk;
+ else
+ return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 00000000000..32bb079572d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __KV_DPM_H__
+#define __KV_DPM_H__
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
+#include "smu7_fusion.h"
+#include "trinity_dpm.h"
+#include "ppsmc.h"
+
+#define KV_NUM_NBPSTATES 4
+
+enum kv_pt_config_reg_type {
+ KV_CONFIGREG_MMR = 0,
+ KV_CONFIGREG_SMC_IND,
+ KV_CONFIGREG_DIDT_IND,
+ KV_CONFIGREG_CACHE,
+ KV_CONFIGREG_MAX
+};
+
+struct kv_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum kv_pt_config_reg_type type;
+};
+
+struct kv_lcac_config_values {
+ u32 block_id;
+ u32 signal_id;
+ u32 t;
+};
+
+struct kv_lcac_config_reg {
+ u32 cntl;
+ u32 block_mask;
+ u32 block_shift;
+ u32 signal_mask;
+ u32 signal_shift;
+ u32 t_mask;
+ u32 t_shift;
+ u32 enable_mask;
+ u32 enable_shift;
+};
+
+struct kv_pl {
+ u32 sclk;
+ u8 vddc_index;
+ u8 ds_divider_index;
+ u8 ss_divider_index;
+ u8 allow_gnb_slow;
+ u8 force_nbp_state;
+ u8 display_wm;
+ u8 vce_wm;
+};
+
+struct kv_ps {
+ struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 num_levels;
+ bool need_dfs_bypass;
+ u8 dpm0_pg_nb_ps_lo;
+ u8 dpm0_pg_nb_ps_hi;
+ u8 dpmx_nb_ps_lo;
+ u8 dpmx_nb_ps_hi;
+};
+
+struct kv_sys_info {
+ u32 bootup_uma_clk;
+ u32 bootup_sclk;
+ u32 dentist_vco_freq;
+ u32 nb_dpm_enable;
+ u32 nbp_memory_clock[KV_NUM_NBPSTATES];
+ u32 nbp_n_clock[KV_NUM_NBPSTATES];
+ u16 bootup_nb_voltage_index;
+ u8 htc_tmp_lmt;
+ u8 htc_hyst_lmt;
+ struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+ struct sumo_vid_mapping_table vid_mapping_table;
+ u32 uma_channel_number;
+};
+
+struct kv_power_info {
+ u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 voltage_drop_t;
+ struct kv_sys_info sys_info;
+ struct kv_pl boot_pl;
+ bool enable_nb_ps_policy;
+ bool disable_nb_ps3_in_battery;
+ bool video_start;
+ bool battery_state;
+ u32 lowest_valid;
+ u32 highest_valid;
+ u16 high_voltage_t;
+ bool cac_enabled;
+ bool bapm_enable;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ /* dpm SMU tables */
+ u8 graphics_dpm_level_count;
+ u8 uvd_level_count;
+ u8 vce_level_count;
+ u8 acp_level_count;
+ u8 samu_level_count;
+ u16 fps_high_t;
+ SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel acpi_level;
+ SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
+ u8 uvd_boot_level;
+ u8 vce_boot_level;
+ u8 acp_boot_level;
+ u8 samu_boot_level;
+ u8 uvd_interval;
+ u8 vce_interval;
+ u8 acp_interval;
+ u8 samu_interval;
+ u8 graphics_boot_level;
+ u8 graphics_interval;
+ u8 graphics_therm_throttle_enable;
+ u8 graphics_voltage_change_enable;
+ u8 graphics_clk_slow_enable;
+ u8 graphics_clk_slow_divider;
+ u8 fps_low_t;
+ u32 low_sclk_interrupt_t;
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool acp_power_gated;
+ bool samu_power_gated;
+ bool nb_dpm_enabled;
+ /* flags */
+ bool enable_didt;
+ bool enable_dpm;
+ bool enable_auto_thermal_throttling;
+ bool enable_nb_dpm;
+ /* caps */
+ bool caps_cac;
+ bool caps_power_containment;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_fps;
+ bool caps_uvd_dpm;
+ bool caps_uvd_pg;
+ bool caps_vce_pg;
+ bool caps_samu_pg;
+ bool caps_acp_pg;
+ bool caps_stable_p_state;
+ bool caps_enable_dfs_bypass;
+ bool caps_sclk_ds;
+ struct radeon_ps current_rps;
+ struct kv_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct kv_ps requested_ps;
+};
+
+
+/* kv_smc.c */
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter);
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit);
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 00000000000..34a226d7e34
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "kv_dpm.h"
+
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
+{
+ u32 i;
+ u32 tmp = 0;
+
+ WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
+
+ if (tmp != 1) {
+ if (tmp == 0xFF)
+ return -EINVAL;
+ else if (tmp == 0xFE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
+{
+ int ret;
+
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
+
+ if (ret == 0)
+ *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
+
+ return ret;
+}
+
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+
+ WREG32(SMC_MSG_ARG_0, parameter);
+
+ return kv_notify_message_to_smu(rdev, msg);
+}
+
+static int kv_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = kv_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
+}
+
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ int ret;
+ u32 data, original_data, addr, extra_shift, t_byte, count, mask;
+
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+ t_byte = addr & 3;
+
+ /* RMW for the initial bytes */
+ if (t_byte != 0) {
+ addr -= t_byte;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ data = 0;
+ mask = 0;
+ count = 4;
+ while (count > 0) {
+ if (t_byte > 0) {
+ mask = (mask << 8) | 0xff;
+ t_byte--;
+ } else if (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ mask <<= 8;
+ } else {
+ data <<= 8;
+ mask = (mask << 8) | 0xff;
+ }
+ count--;
+ }
+
+ data |= original_data & mask;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ addr += 4;
+ }
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data= RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ccb4f8b5485..93c1f9ef5da 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -35,7 +35,7 @@
#include "radeon_ucode.h"
#include "clearstate_cayman.h"
-static u32 tn_rlc_save_restore_register_list[] =
+static const u32 tn_rlc_save_restore_register_list[] =
{
0x98fc,
0x98f0,
@@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] =
0x9830,
0x802c,
};
-static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -175,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -1374,23 +1378,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, 10); /* poll interval */
}
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-}
-
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1564,8 +1551,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
- rb_cntl = drm_order(ring->ring_size / 8);
- rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+ rb_cntl = order_base_2(ring->ring_size / 8);
+ rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
#endif
@@ -1613,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- * Cayman and newer support two asynchronous DMA engines.
- */
-/**
- * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (cayman-SI).
- */
-void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * cayman_dma_stop - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines (cayman-SI).
- */
-void cayman_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- /* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
-
- /* dma1 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
-}
-
-/**
- * cayman_dma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffers and enable them. (cayman-SI).
- * Returns 0 for success, error for failure.
- */
-int cayman_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = DMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = DMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + reg_offset, 0);
- WREG32(DMA_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL + reg_offset);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + reg_offset, dma_cntl);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
-
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cayman_dma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (cayman-SI).
- */
-void cayman_dma_fini(struct radeon_device *rdev)
-{
- cayman_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
-}
-
-static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -2045,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cayman_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2083,6 +1863,11 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_IGP) {
@@ -2109,26 +1894,16 @@ static int cayman_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
cayman_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
rdev->rlc.cs_data = cayman_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -2148,7 +1923,7 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -2199,7 +1974,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -2207,7 +1982,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2215,7 +1990,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2232,12 +2007,11 @@ static int cayman_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -2254,9 +2028,15 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r)
- return r;
+ if (ASIC_IS_DCE6(rdev)) {
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+ } else {
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+ }
return 0;
}
@@ -2287,11 +2067,14 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- r600_audio_fini(rdev);
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_fini(rdev);
+ else
+ r600_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2413,7 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2423,7 +2205,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2684,61 +2466,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
}
}
} else {
- if ((flags & RADEON_VM_PAGE_SYSTEM) ||
- (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -2772,26 +2500,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
-
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 00000000000..dd6e9688fbe
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ * @r600_flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f0f5f748938..f7b625c9e0e 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
@@ -4037,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -4046,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev)
ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
ni_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -4270,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev)
ni_pi->use_power_boost_limit = true;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index b5564a3645d..682842804bc 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,11 +99,68 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+/* CI/KV/KB */
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+
/* TN */
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
+#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 00000000000..da43ab32883
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(push, 1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_KONG 10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
+#define ATOM_PP_THERMALCONTROLLER_LM96163 17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI 19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+ ATOM_PPLIB_FANTABLE basicTable;
+ USHORT usTMax; // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+ USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+ USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
+ USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
+ USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
+ USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table
+ USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+ USHORT usMvddDependencyOnMCLKOffset;
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
+ USHORT usTDPODLimit;
+ USHORT usLoadLineSlope; // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
+
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ UCHAR ucPCIEGen;
+ UCHAR ucUnused1;
+
+ ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ UCHAR ucPCIEGen;
+ USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ USHORT tdpLimit;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+ struct
+ {
+ USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+ ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+ };
+ struct
+ {
+ USHORT usVddc1;
+ USHORT usVddc2;
+ USHORT usVddc3;
+ };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+ USHORT usVoltage;
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+ USHORT usEVClkLow;
+ UCHAR ucEVClkHigh;
+ USHORT usECClkLow;
+ UCHAR ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+ UCHAR ucVCEClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+ UCHAR revid;
+// VCEClockInfoArray array;
+// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+ USHORT usVClkLow;
+ UCHAR ucVClkHigh;
+ USHORT usDClkLow;
+ UCHAR ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+ UCHAR revid;
+// UVDClockInfoArray array;
+// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usSAMClockLow;
+ UCHAR ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usACPClockLow;
+ UCHAR ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+ USHORT usTDP;
+ USHORT usConfigurableTDP;
+ USHORT usTDC;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usLowCACLeakage;
+ USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+ USHORT usMaximumPowerDeliveryLimit;
+ USHORT usReserve[7];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A 1
+#define ATOM_PPM_A_I 2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+ UCHAR ucRevId;
+ UCHAR ucPpmDesign; //A+I or A+A
+ USHORT usCpuCoreNumber;
+ ULONG ulPlatformTDP;
+ ULONG ulSmallACPlatformTDP;
+ ULONG ulPlatformTDC;
+ ULONG ulSmallACPlatformTDC;
+ ULONG ulApuTDP;
+ ULONG ulDGpuTDP;
+ ULONG ulDGpuUlvPower;
+ ULONG ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84..9fc61dd68bc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,12 +1097,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
- 0, 0x7fffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e66e7207735..ea4d3734e6d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * r600_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = r600_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -2417,8 +2395,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2471,7 +2449,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
@@ -2494,345 +2472,6 @@ void r600_cp_fini(struct radeon_device *rdev)
}
/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- */
-/**
- * r600_dma_stop - stop the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine (r6xx-evergreen).
- */
-void r600_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-}
-
-/**
- * r600_dma_resume - setup and start the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- int r;
-
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR, 0);
- WREG32(DMA_RB_WPTR, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI,
- upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO,
- ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL, dma_cntl);
-
- if (rdev->family >= CHIP_RV770)
- WREG32(DMA_MODE, 1);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
- WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * r600_dma_fini - tear down the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine and free the ring (r6xx-evergreen).
- */
-void r600_dma_fini(struct radeon_device *rdev)
-{
- r600_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
-}
-
-/*
- * UVD
- */
-int r600_uvd_rbc_start(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- uint64_t rptr_addr;
- uint32_t rb_bufsz, tmp;
- int r;
-
- rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
-
- if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
- DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
- return -EINVAL;
- }
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
- /* Set the write pointer delay */
- WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
-
- /* set the wb address */
- WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
-
- /* programm the 4GB memory segment for rptr and ring buffer */
- WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
- (0x7 << 16) | (0x1 << 31));
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(UVD_RBC_RB_RPTR, 0x0);
-
- ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
- WREG32(UVD_RBC_RB_WPTR, ring->wptr);
-
- /* set the ring address */
- WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
-
- /* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size);
- rb_bufsz = (0x1 << 8) | rb_bufsz;
- WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
-
- ring->ready = true;
- r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- r = radeon_ring_lock(rdev, ring, 10);
- if (r) {
- DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
- return r;
- }
-
- tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- /* Clear timeout status bits */
- radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
- radeon_ring_write(ring, 0x8);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
- radeon_ring_write(ring, 3);
-
- radeon_ring_unlock_commit(rdev, ring);
-
- return 0;
-}
-
-void r600_uvd_stop(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
- /* Stall UMC and register bus before resetting VCPU */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
- mdelay(1);
-
- /* put VCPU into reset */
- WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
- mdelay(5);
-
- /* disable VCPU clock */
- WREG32(UVD_VCPU_CNTL, 0x0);
-
- /* Unstall UMC and register bus */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
- ring->ready = false;
-}
-
-int r600_uvd_init(struct radeon_device *rdev)
-{
- int i, j, r;
- /* disable byte swapping */
- u32 lmi_swap_cntl = 0;
- u32 mp_swap_cntl = 0;
-
- /* raise clocks while booting up the VCPU */
- radeon_set_uvd_clocks(rdev, 53300, 40000);
-
- /* disable clock gating */
- WREG32(UVD_CGC_GATE, 0);
-
- /* disable interupt */
- WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
-
- /* Stall UMC and register bus before resetting VCPU */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
- mdelay(1);
-
- /* put LMI, VCPU, RBC etc... into reset */
- WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
- LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
- CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
- mdelay(5);
-
- /* take UVD block out of reset */
- WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
- mdelay(5);
-
- /* initialize UVD memory controller */
- WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
-
-#ifdef __BIG_ENDIAN
- /* swap (8 in 32) RB and IB */
- lmi_swap_cntl = 0xa;
- mp_swap_cntl = 0;
-#endif
- WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
- WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
-
- WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXA1, 0x0);
- WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXB1, 0x0);
- WREG32(UVD_MPC_SET_ALU, 0);
- WREG32(UVD_MPC_SET_MUX, 0x88);
-
- /* take all subblocks out of reset, except VCPU */
- WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
- mdelay(5);
-
- /* enable VCPU clock */
- WREG32(UVD_VCPU_CNTL, 1 << 9);
-
- /* enable UMC */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
-
- /* boot up the VCPU */
- WREG32(UVD_SOFT_RESET, 0);
- mdelay(10);
-
- WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(UVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
- mdelay(10);
- WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
- mdelay(10);
- r = -1;
- }
-
- if (r) {
- DRM_ERROR("UVD not responding, giving up!!!\n");
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
- }
-
- /* enable interupt */
- WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
-
- r = r600_uvd_rbc_start(rdev);
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
-
- /* lower clocks again */
- radeon_set_uvd_clocks(rdev, 0, 0);
-
- return r;
-}
-
-/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2887,94 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-/**
- * r600_dma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (r6xx-SI).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- uint32_t tmp = 0;
- unsigned i;
- int r;
-
- WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, ring, 3);
- if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(UVD_CONTEXT_ID);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
/*
* CP fences/semaphores
*/
@@ -3026,30 +2577,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 2);
- return;
-}
-
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -3066,95 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
-/*
- * DMA fences/semaphores
- */
-
-/**
- * r600_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (r6xx-r7xx).
- */
-void r600_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, lower_32_bits(fence->seq));
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
-}
-
-/**
- * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (r6xx-SI).
- */
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 s = emit_wait ? 0 : 1;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-}
-
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, emit_wait ? 1 : 0);
-}
-
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- struct radeon_sa_bo *vb = NULL;
- int r;
-
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
- if (r) {
- return r;
- }
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb, sem);
- return 0;
-}
-
/**
* r600_copy_cpdma - copy pages using the CP DMA engine
*
@@ -3239,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
-/**
- * r600_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r6xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
- r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFE)
- cur_size_in_dw = 0xFFFE;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
- (upper_32_bits(src_offset) & 0xff)));
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -3334,6 +2698,11 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
r600_mc_program(rdev);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -3344,10 +2713,6 @@ static int r600_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -3356,12 +2721,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -3398,14 +2757,14 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -3574,7 +2933,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r600_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -3626,16 +2984,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
- radeon_ring_write(ring, ib->gpu_addr);
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
- radeon_ring_write(ring, ib->length_dw);
-}
-
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
@@ -3689,139 +3037,6 @@ free_scratch:
return r;
}
-/**
- * r600_dma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (r6xx-SI).
- * Returns 0 on success, error on failure.
- */
-int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
- ib.ptr[3] = 0xDEADBEEF;
- ib.length_dw = 4;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_fence *fence = NULL;
- int r;
-
- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
- if (r) {
- DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
- if (r) {
- DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
- if (r) {
- DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = radeon_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- radeon_fence_unref(&fence);
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
-}
-
-/**
- * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (r6xx-r7xx).
- */
-void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
/*
* Interrupts
*
@@ -3838,7 +3053,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 4);
+ rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4075,7 +3290,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index c92eb86a8e5..47fc2b88697 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
}
-struct r600_audio r600_audio_status(struct radeon_device *rdev)
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio status;
+ struct r600_audio_pin status;
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
@@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work)
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
- struct r600_audio audio_status = r600_audio_status(rdev);
+ struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
- if (rdev->audio_status.channels != audio_status.channels ||
- rdev->audio_status.rate != audio_status.rate ||
- rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
- rdev->audio_status.status_bits != audio_status.status_bits ||
- rdev->audio_status.category_code != audio_status.category_code) {
- rdev->audio_status = audio_status;
+ if (rdev->audio.pin[0].channels != audio_status.channels ||
+ rdev->audio.pin[0].rate != audio_status.rate ||
+ rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio.pin[0].status_bits != audio_status.status_bits ||
+ rdev->audio.pin[0].category_code != audio_status.category_code) {
+ rdev->audio.pin[0] = audio_status;
changed = true;
}
@@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
}
-/*
- * turn on/off audio engine
- */
-static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+/* enable the audio stream */
+static void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
- DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- rdev->audio_enabled = enable;
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev)
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- r600_audio_engine_enable(rdev, true);
+ rdev->audio.enabled = true;
+
+ rdev->audio.num_pins = 1;
+ rdev->audio.pin[0].channels = -1;
+ rdev->audio.pin[0].rate = -1;
+ rdev->audio.pin[0].bits_per_sample = -1;
+ rdev->audio.pin[0].status_bits = 0;
+ rdev->audio.pin[0].category_code = 0;
+ rdev->audio.pin[0].id = 0;
- rdev->audio_status.channels = -1;
- rdev->audio_status.rate = -1;
- rdev->audio_status.bits_per_sample = -1;
- rdev->audio_status.status_bits = 0;
- rdev->audio_status.category_code = 0;
+ r600_audio_enable(rdev, &rdev->audio.pin[0], true);
return 0;
}
@@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!rdev->audio_enabled)
+ if (!rdev->audio.enabled)
return;
- r600_audio_engine_enable(rdev, false);
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
+
+ rdev->audio.enabled = false;
+}
+
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
+{
+ /* only one pin on 6xx-NI */
+ return &rdev->audio.pin[0];
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f651881eb0a..daf7572be97 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+static __pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
deleted file mode 100644
index 9fb5780a552..00000000000
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "r600d.h"
-#include "r600_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
-/* emits 21 on rv770+, 23 on r600 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(ring, 2 << 0);
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (pitch << 0) | (slice << 10));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, cb_color_info);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 21dw + 1 surface sync = 26dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
- u32 sq_pgm_resources;
-
- /* setup shader regs */
- sq_pgm_resources = (1 << 0);
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 2);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 9 + 1 sync (5) = 14*/
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2;
-
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0x460);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1);
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, 1 << 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-}
-
-/* emits 9 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
- S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
- S_038000_TEX_WIDTH(w - 1);
-
- sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
- sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
-
- sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
- S_038010_DST_SEL_X(SQ_SEL_X) |
- S_038010_DST_SEL_Y(SQ_SEL_Y) |
- S_038010_DST_SEL_Z(SQ_SEL_Z) |
- S_038010_DST_SEL_W(SQ_SEL_W);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 14 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
- u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- switch (rdev->family) {
- case CHIP_R600:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV630:
- case CHIP_RV635:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 40;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV610:
- case CHIP_RV620:
- case CHIP_RS780:
- case CHIP_RS880:
- default:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV670:
- num_ps_gprs = 144;
- num_vs_gprs = 40;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV770:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 256;
- num_vs_stack_entries = 256;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV730:
- case CHIP_RV740:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV710:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 48;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- }
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (DX9_CONSTS |
- ALU_INST_PREFER_VECTOR |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
-}
-
-int r600_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 76;
- /* set_render_target emits 2 extra dwords on rv6xx */
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
- rdev->r600_blit.ring_size_per_loop += 2;
-
- rdev->r600_blit.max_dim = 8192;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family >= CHIP_RV770)
- rdev->r600_blit.state_len = r7xx_default_size;
- else
- rdev->r600_blit.state_len = r6xx_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- obj_size += r6xx_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- obj_size += r6xx_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (rdev->r600_blit.shader_obj == NULL) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("r600 failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
- if (rdev->family >= CHIP_RV770)
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r7xx_default_state, rdev->r600_blit.state_len * 4);
- else
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r6xx_default_state, rdev->r600_blit.state_len * 4);
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < r6xx_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
- for (i = 0; i < r6xx_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
-
-void r600_blit_fini(struct radeon_device *rdev)
-{
- int r;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- if (rdev->r600_blit.shader_obj == NULL)
- return;
- /* If we can't reserve the bo, unref should be enough to destroy
- * it when it becomes idle.
- */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- radeon_bo_unref(&rdev->r600_blit.shader_obj);
-}
-
-static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
- int *width, int *height, int max_dim)
-{
- unsigned max_pages;
- unsigned pages = num_gpu_pages;
- int w, h;
-
- if (num_gpu_pages == 0) {
- /* not supposed to be called with no pages, but just in case */
- h = 0;
- w = 0;
- pages = 0;
- WARN_ON(1);
- } else {
- int rect_order = 2;
- h = RECT_UNIT_H;
- while (num_gpu_pages / rect_order) {
- h *= 2;
- rect_order *= 4;
- if (h >= max_dim) {
- h = max_dim;
- break;
- }
- }
- max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
- if (pages > max_pages)
- pages = max_pages;
- w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
- w = (w / RECT_UNIT_W) * RECT_UNIT_W;
- pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
- BUG_ON(pages == 0);
- }
-
-
- DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
-
- /* return width and height only of the caller wants it */
- if (height)
- *height = h;
- if (width)
- *width = w;
-
- return pages;
-}
-
-
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
- int ring_size;
- int num_loops = 0;
- int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
-
- /* num loops */
- while (num_gpu_pages) {
- num_gpu_pages -=
- r600_blit_create_rect(num_gpu_pages, NULL, NULL,
- rdev->r600_blit.max_dim);
- num_loops++;
- }
-
- /* 48 bytes for vertex per loop */
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
- (num_loops*48)+256, 256, true);
- if (r) {
- return r;
- }
-
- r = radeon_semaphore_create(rdev, sem);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- return r;
- }
-
- /* calculate number of loops correctly */
- ring_size = num_loops * dwords_per_loop;
- ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring, ring_size);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- radeon_semaphore_free(rdev, sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
- radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
- RADEON_RING_TYPE_GFX_INDEX);
- radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
- } else {
- radeon_semaphore_free(rdev, sem, NULL);
- }
-
- rdev->r600_blit.primitives.set_default_state(rdev);
- rdev->r600_blit.primitives.set_shaders(rdev);
- return 0;
-}
-
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
-
- r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_sa_bo_free(rdev, &vb, *fence);
- radeon_semaphore_free(rdev, &sem, *fence);
-}
-
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb)
-{
- u64 vb_gpu_addr;
- u32 *vb_cpu_addr;
-
- DRM_DEBUG("emitting copy %16llx %16llx %d\n",
- src_gpu_addr, dst_gpu_addr, num_gpu_pages);
- vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
- vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
-
- while (num_gpu_pages) {
- int w, h;
- unsigned size_in_bytes;
- unsigned pages_per_loop =
- r600_blit_create_rect(num_gpu_pages, &w, &h,
- rdev->r600_blit.max_dim);
-
- size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
- DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
-
- vb_cpu_addr[0] = 0;
- vb_cpu_addr[1] = 0;
- vb_cpu_addr[2] = 0;
- vb_cpu_addr[3] = 0;
-
- vb_cpu_addr[4] = 0;
- vb_cpu_addr[5] = int2float(h);
- vb_cpu_addr[6] = 0;
- vb_cpu_addr[7] = int2float(h);
-
- vb_cpu_addr[8] = int2float(w);
- vb_cpu_addr[9] = int2float(h);
- vb_cpu_addr[10] = int2float(w);
- vb_cpu_addr[11] = int2float(h);
-
- rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
- w, h, w, src_gpu_addr, size_in_bytes);
- rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
- w, h, dst_gpu_addr);
- rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
- rdev->r600_blit.primitives.draw_auto(rdev);
- rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- size_in_bytes, dst_gpu_addr);
-
- vb_cpu_addr += 12;
- vb_gpu_addr += 4*12;
- src_gpu_addr += size_in_bytes;
- dst_gpu_addr += size_in_bytes;
- num_gpu_pages -= pages_per_loop;
- }
-}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index 2f3ce7a7597..f437d36dd98 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[];
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
-__pure uint32_t int2float(uint32_t x);
#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fd..d8eb48bff0e 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 00000000000..3b317456512
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+
+/**
+ * r600_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (r6xx+).
+ */
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e5c860f4ccb..fa0de46fcc0 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
return vblank_time_us;
}
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 vrefresh = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ vrefresh = radeon_crtc->hw_mode.vrefresh;
+ break;
+ }
+ }
+
+ return vrefresh;
+}
+
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
@@ -745,6 +763,8 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
return true;
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
@@ -779,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
u32 size = atom_table->ucNumEntries *
sizeof(struct radeon_clock_voltage_dependency_entry);
int i;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
radeon_table->entries = kzalloc(size, GFP_KERNEL);
if (!radeon_table->entries)
return -ENOMEM;
+ entry = &atom_table->entries[0];
for (i = 0; i < atom_table->ucNumEntries; i++) {
- radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
- (atom_table->entries[i].ucClockHigh << 16);
- radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
+ radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+ (entry->ucClockHigh << 16);
+ radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
}
radeon_table->count = atom_table->ucNumEntries;
@@ -875,6 +899,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
return ret;
}
}
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ return ret;
+ }
+ }
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
@@ -898,27 +935,27 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kzalloc(psl->ucNumEntries *
sizeof(struct radeon_phase_shedding_limits_entry),
GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(psl->entries[i].usSclkLow) |
- (psl->entries[i].ucSclkHigh << 16);
+ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(psl->entries[i].usMclkLow) |
- (psl->entries[i].ucMclkHigh << 16);
+ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(psl->entries[i].usVoltage);
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
}
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
psl->ucNumEntries;
@@ -945,30 +982,140 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_CAC_Leakage_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(cac_table->entries[i].usVddc);
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(cac_table->entries[i].ulLeakageValue);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+ le16_to_cpu(entry->usVddc1);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+ le16_to_cpu(entry->usVddc2);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+ le16_to_cpu(entry->usVddc3);
+ } else {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+ le16_to_cpu(entry->usVddc);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+ le32_to_cpu(entry->ulLeakageValue);
+ }
+ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
}
rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
}
}
- /* ppm table */
+ /* ext tables */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+ ext_hdr->usVCETableOffset) {
+ VCEClockInfoArray *array = (VCEClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_vce_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ VCEClockInfo *vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+ ext_hdr->usUVDTableOffset) {
+ UVDClockInfoArray *array = (UVDClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ UVDClockInfo *uvd_clk = (UVDClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(limits->entries[i].usVoltage);
+ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+ ext_hdr->usSAMUTableOffset) {
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+ }
+ }
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
ext_hdr->usPPMTableOffset) {
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
@@ -977,10 +1124,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.ppm_table) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
@@ -1003,6 +1147,71 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table->tj_max =
le32_to_cpu(ppm->ulTjmax);
}
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+ ext_hdr->usACPTableOffset) {
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+ ext_hdr->usPowerTuneTableOffset) {
+ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ ATOM_PowerTune_Table *pt;
+ rdev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+ ppt->usMaximumPowerDeliveryLimit;
+ pt = &ppt->power_tune_table;
+ } else {
+ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+ pt = &ppt->power_tune_table;
+ }
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+ le16_to_cpu(pt->usConfigurableTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+ le16_to_cpu(pt->usBatteryPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+ le16_to_cpu(pt->usSmallPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+ le16_to_cpu(pt->usLowCACLeakage);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+ le16_to_cpu(pt->usHighCACLeakage);
+ }
}
return 0;
@@ -1016,12 +1225,24 @@ void r600_free_extended_power_table(struct radeon_device *rdev)
kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
+ kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
if (rdev->pm.dpm.dyn_state.ppm_table)
kfree(rdev->pm.dpm.dyn_state.ppm_table);
+ if (rdev->pm.dpm.dyn_state.cac_tdp_table)
+ kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
@@ -1046,3 +1267,36 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
}
return RADEON_PCIE_GEN1;
}
+
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes)
+{
+ switch (asic_lanes) {
+ case 0:
+ default:
+ return default_lanes;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 4:
+ return 4;
+ case 8:
+ return 8;
+ case 12:
+ return 12;
+ case 16:
+ return 16;
+ }
+}
+
+u8 r600_encode_pci_lane_width(u32 lanes)
+{
+ u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+
+ if (lanes > 16)
+ return 0;
+
+ return encoded_lanes[lanes];
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 7c822d9ae53..1000bf9719f 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps);
void r600_dpm_print_ps_status(struct radeon_device *rdev,
struct radeon_ps *rps);
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
bool r600_is_uvd_state(u32 class, u32 class2);
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u);
@@ -224,4 +225,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
enum radeon_pcie_gen asic_gen,
enum radeon_pcie_gen default_gen);
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes);
+u8 r600_encode_pci_lane_width(u32 lanes);
+
#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f264df5470f..f443010ce90 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -283,6 +283,107 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
}
}
+static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
/*
* update the info frames with the data from the current display mode
*/
@@ -327,6 +428,11 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
+ if (ASIC_IS_DCE32(rdev)) {
+ dce3_2_afmt_write_speaker_allocation(encoder);
+ dce3_2_afmt_write_sad_regs(encoder);
+ }
+
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
@@ -382,7 +488,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct r600_audio audio = r600_audio_status(rdev);
+ struct r600_audio_pin audio = r600_audio_status(rdev);
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
@@ -491,6 +597,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable)
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = NULL;
+
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7c780839a7f..454f90a849e 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -960,6 +960,42 @@
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9f19259667d..ff8b564ce2b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -152,6 +152,47 @@ extern int radeon_aspm;
#define RADEON_RESET_MC (1 << 10)
#define RADEON_RESET_DISPLAY (1 << 11)
+/* CG block flags */
+#define RADEON_CG_BLOCK_GFX (1 << 0)
+#define RADEON_CG_BLOCK_MC (1 << 1)
+#define RADEON_CG_BLOCK_SDMA (1 << 2)
+#define RADEON_CG_BLOCK_UVD (1 << 3)
+#define RADEON_CG_BLOCK_VCE (1 << 4)
+#define RADEON_CG_BLOCK_HDP (1 << 5)
+#define RADEON_CG_BLOCK_BIF (1 << 6)
+
+/* CG flags */
+#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
+#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
+#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
+#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
+#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
+#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
+#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
+#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
+#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
+#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
+#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
+#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
+#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
+#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
+#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
+#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
+#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
+
+/* PG flags */
+#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
+#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
+#define RADEON_PG_SUPPORT_UVD (1 << 3)
+#define RADEON_PG_SUPPORT_VCE (1 << 4)
+#define RADEON_PG_SUPPORT_CP (1 << 5)
+#define RADEON_PG_SUPPORT_GDS (1 << 6)
+#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
+#define RADEON_PG_SUPPORT_SDMA (1 << 8)
+#define RADEON_PG_SUPPORT_ACP (1 << 9)
+#define RADEON_PG_SUPPORT_SAMU (1 << 10)
+
/* max cursor sizes (in pixels) */
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
@@ -238,6 +279,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
u16 *voltage,
u16 leakage_idx);
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id);
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -492,9 +539,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
/*
* Semaphores.
@@ -682,7 +726,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
struct radeon_irq {
bool installed;
@@ -746,8 +790,6 @@ struct radeon_ring {
uint32_t align_mask;
uint32_t ptr_mask;
bool ready;
- u32 ptr_reg_shift;
- u32 ptr_reg_mask;
u32 nop;
u32 idx;
u64 last_semaphore_signal_addr;
@@ -844,35 +886,6 @@ struct r600_ih {
bool enabled;
};
-struct r600_blit_cp_primitives {
- void (*set_render_target)(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr);
- void (*cp_set_surface_sync)(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr);
- void (*set_shaders)(struct radeon_device *rdev);
- void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
- void (*set_tex_resource)(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size);
- void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2);
- void (*draw_auto)(struct radeon_device *rdev);
- void (*set_default_state)(struct radeon_device *rdev);
-};
-
-struct r600_blit {
- struct radeon_bo *shader_obj;
- struct r600_blit_cp_primitives primitives;
- int max_dim;
- int ring_size_common;
- int ring_size_per_loop;
- u64 shader_gpu_addr;
- u32 vs_offset, ps_offset;
- u32 state_offset;
- u32 state_len;
-};
-
/*
* RLC stuff
*/
@@ -883,13 +896,19 @@ struct radeon_rlc {
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
- u32 *reg_list;
+ const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct radeon_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -921,8 +940,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1036,7 +1054,6 @@ struct radeon_wb {
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
-#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584
@@ -1147,6 +1164,7 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_SI,
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
};
struct radeon_voltage {
@@ -1220,6 +1238,9 @@ struct radeon_ps {
/* UVD clocks */
u32 vclk;
u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
/* asic priv */
void *ps_priv;
};
@@ -1270,14 +1291,21 @@ struct radeon_clock_voltage_dependency_table {
struct radeon_clock_voltage_dependency_entry *entries;
};
-struct radeon_cac_leakage_entry {
- u16 vddc;
- u32 leakage;
+union radeon_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
};
struct radeon_cac_leakage_table {
u32 count;
- struct radeon_cac_leakage_entry *entries;
+ union radeon_cac_leakage_entry *entries;
};
struct radeon_phase_shedding_limits_entry {
@@ -1291,6 +1319,28 @@ struct radeon_phase_shedding_limits_table {
struct radeon_phase_shedding_limits_entry *entries;
};
+struct radeon_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct radeon_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct radeon_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct radeon_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_vce_clock_voltage_dependency_entry *entries;
+};
+
struct radeon_ppm_table {
u8 ppm_design;
u16 cpu_core_number;
@@ -1304,11 +1354,27 @@ struct radeon_ppm_table {
u32 tj_max;
};
+struct radeon_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
struct radeon_dpm_dynamic_state {
struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
struct radeon_clock_array valid_sclk_values;
struct radeon_clock_array valid_mclk_values;
struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
@@ -1320,6 +1386,7 @@ struct radeon_dpm_dynamic_state {
struct radeon_cac_leakage_table cac_leakage_table;
struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
struct radeon_ppm_table *ppm_table;
+ struct radeon_cac_tdp_table *cac_tdp_table;
};
struct radeon_dpm_fan {
@@ -1389,11 +1456,12 @@ struct radeon_dpm {
struct radeon_dpm_thermal thermal;
/* forced levels */
enum radeon_dpm_forced_level forced_level;
+ /* track UVD streams */
+ unsigned sd;
+ unsigned hd;
};
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state);
-
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
struct radeon_pm {
struct mutex mutex;
@@ -1470,6 +1538,7 @@ struct radeon_uvd {
void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ unsigned img_size[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
};
@@ -1498,12 +1567,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
unsigned cg_upll_func_cntl);
-struct r600_audio {
+struct r600_audio_pin {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
+ u32 offset;
+ bool connected;
+ u32 id;
+};
+
+struct r600_audio {
+ bool enabled;
+ struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
+ int num_pins;
};
/*
@@ -1535,6 +1613,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
+/*
+ * ASIC ring specific functions.
+ */
+struct radeon_asic_ring {
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+
+ /* validating and patching of IBs */
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+
+ /* command emmit functions */
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+
+ /* testing functions */
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+
+ /* deprecated */
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+};
/*
* ASIC specific functions.
@@ -1578,23 +1684,7 @@ struct radeon_asic {
uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
- struct {
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- int (*cs_parse)(struct radeon_cs_parser *p);
- void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
- void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-
- u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- } ring[RADEON_NUM_RINGS];
+ struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
@@ -1687,6 +1777,7 @@ struct radeon_asic {
void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
+ void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
} dpm;
/* pageflipping */
struct {
@@ -2066,7 +2157,6 @@ struct radeon_device {
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
- struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2077,9 +2167,8 @@ struct radeon_device {
struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- bool audio_enabled;
bool has_uvd;
- struct r600_audio audio_status; /* audio stuff */
+ struct r600_audio audio; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -2097,6 +2186,9 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
+ /* clock, powergating flags */
+ u32 cg_flags;
+ u32 pg_flags;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2155,6 +2247,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
+#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
+#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -2286,6 +2380,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
WREG32(R600_UVD_CTX_DATA, (v));
}
+
+static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
+{
+ u32 r;
+
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ r = RREG32(CIK_DIDT_IND_DATA);
+ return r;
+}
+
+static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ WREG32(CIK_DIDT_IND_DATA, (v));
+}
+
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2381,7 +2491,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
@@ -2389,16 +2499,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
-#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
-#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
-#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
-#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
-#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
-#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
-#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r))
-#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r))
-#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
+#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
+#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -2406,8 +2516,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
-#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
-#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
@@ -2458,6 +2568,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
+#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
/* Common functions */
/* AGP */
@@ -2524,6 +2635,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f8f8b3113dd..630853b9684 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev)
/*
* ASIC
*/
+
+static struct radeon_asic_ring r100_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r100_asic = {
.init = &r100_init,
.fini = &r100_fini,
@@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = {
},
};
+static struct radeon_asic_ring r300_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = {
},
};
+static struct radeon_asic_ring r600_gfx_ring = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring r600_dma_ring = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic r600_asic = {
.init = &r600_init,
.fini = &r600_fini,
@@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1022,7 +935,7 @@ static struct radeon_asic r600_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1115,7 +1006,7 @@ static struct radeon_asic rv6xx_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1187,30 +1078,8 @@ static struct radeon_asic rs780_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1226,7 +1095,7 @@ static struct radeon_asic rs780_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1280,6 +1149,19 @@ static struct radeon_asic rs780_asic = {
},
};
+static struct radeon_asic_ring rv770_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
.fini = &rv770_fini,
@@ -1297,42 +1179,9 @@ static struct radeon_asic rv770_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1348,7 +1197,7 @@ static struct radeon_asic rv770_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1405,6 +1254,32 @@ static struct radeon_asic rv770_asic = {
},
};
+static struct radeon_asic_ring evergreen_gfx_ring = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring evergreen_dma_ring = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
@@ -1422,42 +1297,9 @@ static struct radeon_asic evergreen_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1473,7 +1315,7 @@ static struct radeon_asic evergreen_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1547,42 +1389,9 @@ static struct radeon_asic sumo_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1598,7 +1407,7 @@ static struct radeon_asic sumo_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1671,42 +1480,9 @@ static struct radeon_asic btc_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1722,7 +1498,7 @@ static struct radeon_asic btc_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1779,6 +1555,49 @@ static struct radeon_asic btc_asic = {
},
};
+static struct radeon_asic_ring cayman_gfx_ring = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring cayman_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr
+};
+
+static struct radeon_asic_ring cayman_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v3_1_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1802,88 +1621,12 @@ static struct radeon_asic cayman_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1899,7 +1642,7 @@ static struct radeon_asic cayman_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1979,88 +1722,12 @@ static struct radeon_asic trinity_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -2072,9 +1739,11 @@ static struct radeon_asic trinity_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2130,6 +1799,36 @@ static struct radeon_asic trinity_asic = {
},
};
+static struct radeon_asic_ring si_gfx_ring = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring si_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &si_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
@@ -2153,88 +1852,12 @@ static struct radeon_asic si_asic = {
.set_page = &si_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &si_irq_set,
@@ -2246,6 +1869,8 @@ static struct radeon_asic si_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2305,6 +1930,51 @@ static struct radeon_asic si_asic = {
},
};
+static struct radeon_asic_ring ci_gfx_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_gfx_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring ci_cp_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_compute_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &cik_compute_ring_get_rptr,
+ .get_wptr = &cik_compute_ring_get_wptr,
+ .set_wptr = &cik_compute_ring_set_wptr,
+};
+
+static struct radeon_asic_ring ci_dma_ring = {
+ .ib_execute = &cik_sdma_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_sdma_fence_ring_emit,
+ .emit_semaphore = &cik_sdma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_sdma_ring_test,
+ .ib_test = &cik_sdma_ib_test,
+ .is_lockup = &cik_sdma_is_lockup,
+ .vm_flush = &cik_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic ci_asic = {
.init = &cik_init,
.fini = &cik_fini,
@@ -2328,88 +1998,12 @@ static struct radeon_asic ci_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2419,6 +2013,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2452,6 +2048,25 @@ static struct radeon_asic ci_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &ci_get_temp,
+ },
+ .dpm = {
+ .init = &ci_dpm_init,
+ .setup_asic = &ci_dpm_setup_asic,
+ .enable = &ci_dpm_enable,
+ .disable = &ci_dpm_disable,
+ .pre_set_power_state = &ci_dpm_pre_set_power_state,
+ .set_power_state = &ci_dpm_set_power_state,
+ .post_set_power_state = &ci_dpm_post_set_power_state,
+ .display_configuration_changed = &ci_dpm_display_configuration_changed,
+ .fini = &ci_dpm_fini,
+ .get_sclk = &ci_dpm_get_sclk,
+ .get_mclk = &ci_dpm_get_mclk,
+ .print_power_state = &ci_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &ci_dpm_force_performance_level,
+ .vblank_too_short = &ci_dpm_vblank_too_short,
+ .powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2483,88 +2098,12 @@ static struct radeon_asic kv_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2574,6 +2113,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2607,6 +2148,24 @@ static struct radeon_asic kv_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &kv_get_temp,
+ },
+ .dpm = {
+ .init = &kv_dpm_init,
+ .setup_asic = &kv_dpm_setup_asic,
+ .enable = &kv_dpm_enable,
+ .disable = &kv_dpm_disable,
+ .pre_set_power_state = &kv_dpm_pre_set_power_state,
+ .set_power_state = &kv_dpm_set_power_state,
+ .post_set_power_state = &kv_dpm_post_set_power_state,
+ .display_configuration_changed = &kv_dpm_display_configuration_changed,
+ .fini = &kv_dpm_fini,
+ .get_sclk = &kv_dpm_get_sclk,
+ .get_mclk = &kv_dpm_get_mclk,
+ .print_power_state = &kv_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &kv_dpm_force_performance_level,
+ .powergate_uvd = &kv_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2776,19 +2335,188 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->has_uvd = false;
else
rdev->has_uvd = true;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_PITCAIRN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_VERDE:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0 |
+ /*RADEON_PG_SUPPORT_GFX_CG | */
+ RADEON_PG_SUPPORT_SDMA;
+ break;
+ case CHIP_OLAND:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_HAINAN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ default:
+ rdev->cg_flags = 0;
+ rdev->pg_flags = 0;
+ break;
+ }
break;
case CHIP_BONAIRE:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
+ rdev->has_uvd = true;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
break;
case CHIP_KAVERI:
case CHIP_KABINI:
rdev->asic = &kv_asic;
/* set num crtcs */
- if (rdev->family == CHIP_KAVERI)
+ if (rdev->family == CHIP_KAVERI) {
rdev->num_crtc = 4;
- else
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_GFX_DMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_ACP |
+ RADEON_PG_SUPPORT_SAMU;*/
+ } else {
rdev->num_crtc = 2;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_SAMU;*/
+ }
+ rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d61d5aac18..818bbe6b884 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -336,10 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -371,8 +367,6 @@ int r600_count_pipe_bits(uint32_t val);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
-int r600_blit_init(struct radeon_device *rdev);
-void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
@@ -385,28 +379,25 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-struct r600_audio r600_audio_status(struct radeon_device *rdev);
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+/* r600 dma */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/* rv6xx dpm */
int rv6xx_dpm_init(struct radeon_device *rdev);
int rv6xx_dpm_enable(struct radeon_device *rdev);
@@ -438,19 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
-/* uvd */
-int r600_uvd_init(struct radeon_device *rdev);
-int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_stop(struct radeon_device *rdev);
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-
/*
* rv770,rv730,rv710,rv740
*/
@@ -468,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
-int rv770_uvd_resume(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
/* rv7xx pm */
@@ -530,7 +507,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -652,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev,
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
+int dce6_audio_init(struct radeon_device *rdev);
+void dce6_audio_fini(struct radeon_device *rdev);
/*
* si
@@ -712,7 +690,6 @@ u32 cik_get_xclk(struct radeon_device *rdev);
uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int cik_uvd_resume(struct radeon_device *rdev);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -763,5 +740,81 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int ci_get_temp(struct radeon_device *rdev);
+int kv_get_temp(struct radeon_device *rdev);
+
+int ci_dpm_init(struct radeon_device *rdev);
+int ci_dpm_enable(struct radeon_device *rdev);
+void ci_dpm_disable(struct radeon_device *rdev);
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
+int ci_dpm_set_power_state(struct radeon_device *rdev);
+void ci_dpm_post_set_power_state(struct radeon_device *rdev);
+void ci_dpm_setup_asic(struct radeon_device *rdev);
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
+void ci_dpm_fini(struct radeon_device *rdev);
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+int kv_dpm_init(struct radeon_device *rdev);
+int kv_dpm_enable(struct radeon_device *rdev);
+void kv_dpm_disable(struct radeon_device *rdev);
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
+int kv_dpm_set_power_state(struct radeon_device *rdev);
+void kv_dpm_post_set_power_state(struct radeon_device *rdev);
+void kv_dpm_setup_asic(struct radeon_device *rdev);
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
+void kv_dpm_fini(struct radeon_device *rdev);
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+/* uvd v1.0 */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
+int uvd_v1_0_init(struct radeon_device *rdev);
+void uvd_v1_0_fini(struct radeon_device *rdev);
+int uvd_v1_0_start(struct radeon_device *rdev);
+void uvd_v1_0_stop(struct radeon_device *rdev);
+
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/* uvd v2.2 */
+int uvd_v2_2_resume(struct radeon_device *rdev);
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+
+/* uvd v3.1 */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+
+/* uvd v4.2 */
+int uvd_v4_2_resume(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4ccd61f60eb..404e25d285b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
@@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
@@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
-
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -206,12 +207,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
}
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ pin = gpio_info->asGPIO_Pin;
for (i = 0; i < num_indices; i++) {
- pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
@@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
gpio.valid = true;
break;
}
+ pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
+ ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
}
}
@@ -711,13 +716,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ u8 *num_dst_objs = (u8 *)
+ ((u8 *)router_src_dst_table + 1 +
+ (router_src_dst_table->ucNumberOfSrc * 2));
+ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
int enum_id;
router.router_id = router_obj_id;
- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
- enum_id++) {
+ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ le16_to_cpu(dst_objs[enum_id]))
break;
}
@@ -1480,6 +1488,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
uint8_t frev, crev;
int i, num_indices;
+ if (id == ASIC_INTERNAL_MEMORY_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
+ return false;
+ }
+ if (id == ASIC_INTERNAL_ENGINE_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
+ return false;
+ }
+
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
@@ -1672,7 +1689,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
kfree(edid);
}
}
- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ record += fake_edid_record->ucFakeEDIDLength ?
+ fake_edid_record->ucFakeEDIDLength + 2 :
+ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
@@ -2237,6 +2256,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
@@ -3077,6 +3101,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev
return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 3:
+ case 4:
+ args.v3.ucVoltageType = 0;
+ args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+ args.v3.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ int i, j;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ switch (crev) {
+ case 1:
+ if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
+ return -EINVAL;
+ leakage_bin = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usLeakageBinArrayOffset));
+ vddc_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
+ vddc_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
+ vddci_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
+ vddci_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
+
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3279,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev,
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
if (formula->ucNumOfVoltageEntries) {
+ VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)&formula->asVIDAdjustEntries[0] +
+ (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
*max_voltage =
- le16_to_cpu(formula->asVIDAdjustEntries[
- formula->ucNumOfVoltageEntries - 1
- ].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
return 0;
}
}
@@ -3442,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
+ VOLTAGE_LUT_ENTRY *lut;
if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &formula->asVIDAdjustEntries[0];
for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
ret = radeon_atom_get_voltage_gpio_settings(rdev,
voltage_table->entries[i].value,
voltage_type,
@@ -3454,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
&voltage_table->mask_low);
if (ret)
return ret;
+ lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
}
voltage_table->count = formula->ucNumOfVoltageEntries;
return 0;
@@ -3473,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
&voltage_object->v3.asGpioVoltageObj;
+ VOLTAGE_LUT_ENTRY_V2 *lut;
if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &gpio->asVolGpioLut[0];
for (i = 0; i < gpio->ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
voltage_table->entries[i].smio_low =
- le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId);
+ le32_to_cpu(lut->ulVoltageId);
+ lut = (VOLTAGE_LUT_ENTRY_V2 *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
}
voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
voltage_table->count = gpio->ucGpioEntryNum;
@@ -3605,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
union vram_info *vram_info;
u32 mem_timing_size = gddr5 ?
sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
- u8 *p;
memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
@@ -3624,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
+ ATOM_MEMORY_TIMING_FORMAT *format;
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
@@ -3634,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
mclk_range_table->num_entries = (u8)
((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
mem_timing_size);
- p = (u8 *)&vram_module->asMemTiming[0];
+ format = &vram_module->asMemTiming[0];
for (i = 0; i < mclk_range_table->num_entries; i++) {
- ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
- p += mem_timing_size;
+ format = (ATOM_MEMORY_TIMING_FORMAT *)
+ ((u8 *)format + mem_timing_size);
}
} else
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72c9d2..00000000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- * Copyright 2012 Alcatel-Lucent, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __RADEON_BLIT_COMMON_H__
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
-
-#define RECT_UNIT_H 32
-#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
-
-#define __RADEON_BLIT_COMMON_H__
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef..3cae2bbc185 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 13a130fb351..a5608441037 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
@@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
+
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
@@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_alloc_pt(rdev, vm);
@@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
- /* XXX pick SD/HD/MVC */
- if (parser.ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_note_usage(rdev);
-
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 63398ae1dbf..16cb8792b1e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vram_limit = 0;
}
+ if (radeon_gart_size == -1) {
+ /* default to a larger gart size on newer asics */
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
+ }
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
- dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- radeon_gart_size = 512;
-
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- radeon_gart_size = 512;
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.gtt_size = 512 * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1270,7 +1282,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
r = radeon_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c2b67b4e1ac..b055bddaa94 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1254,41 +1255,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
- if (ASIC_IS_DCE6(rdev)) {
- /* todo */
+ if (ASIC_IS_NODCE(rdev)) {
+ /* nothing to do */
} else if (ASIC_IS_DCE4(rdev)) {
+ static uint32_t eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ 0x13830 - 0x7030,
+ };
+ int num_afmt;
+
+ /* DCE8 has 7 audio blocks tied to DIG encoders */
+ /* DCE6 has 6 audio blocks tied to DIG encoders */
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
- rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[0]) {
- rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
- rdev->mode_info.afmt[0]->id = 0;
- }
- rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[1]) {
- rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
- rdev->mode_info.afmt[1]->id = 1;
- }
- if (!ASIC_IS_DCE41(rdev)) {
- rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[2]) {
- rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
- rdev->mode_info.afmt[2]->id = 2;
- }
- rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[3]) {
- rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
- rdev->mode_info.afmt[3]->id = 3;
- }
- rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[4]) {
- rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
- rdev->mode_info.afmt[4]->id = 4;
- }
- rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[5]) {
- rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
- rdev->mode_info.afmt[5]->id = 5;
+ if (ASIC_IS_DCE8(rdev))
+ num_afmt = 7;
+ else if (ASIC_IS_DCE6(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE5(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE41(rdev))
+ num_afmt = 2;
+ else /* DCE4 */
+ num_afmt = 6;
+
+ BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
+ for (i = 0; i < num_afmt; i++) {
+ rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[i]) {
+ rdev->mode_info.afmt[i]->offset = eg_offsets[i];
+ rdev->mode_info.afmt[i]->id = i;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8e..cb4445f55a9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,7 +81,6 @@
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
-int radeon_driver_firstopen_kms(struct drm_device *dev);
void radeon_driver_lastclose_kms(struct drm_device *dev);
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
-extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
size_t size,
@@ -154,7 +148,7 @@ int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
int radeon_vram_limit = 0;
-int radeon_gart_size = 512; /* default gart size */
+int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
@@ -187,7 +181,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
static struct drm_driver driver_old = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME,
+ DRIVER_USE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME | DRIVER_RENDER,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
- .firstopen = radeon_driver_firstopen_kms,
.open = radeon_driver_open_kms,
.preclose = radeon_driver_preclose_kms,
.postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
- .dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
- .dumb_destroy = radeon_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab6..dce99c8a583 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 081886b0642..cc9e8482cf3 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+ INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
+
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev);
if (r) {
rdev->irq.installed = false;
+ flush_work(&rdev->hotplug_work);
return r;
}
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
-
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a610..61580ddc4eb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_SI_CP_DMA_COMPUTE:
+ *value = 1;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -449,19 +452,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
/**
- * radeon_driver_firstopen_kms - drm callback for first open
- *
- * @dev: drm dev pointer
- *
- * Nothing to be done for KMS (all asics).
- * Returns 0 on success.
- */
-int radeon_driver_firstopen_kms(struct drm_device *dev)
-{
- return 0;
-}
-
-/**
* radeon_driver_firstopen_kms - drm callback for last close
*
* @dev: drm dev pointer
@@ -683,16 +673,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
drmcrtc);
}
-/*
- * IOCTL.
- */
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Not valid in KMS. */
- return -EINVAL;
-}
-
#define KMS_INVALID_IOCTL(name) \
int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
{ \
@@ -732,7 +712,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
KMS_INVALID_IOCTL(radeon_surface_free_kms)
-struct drm_ioctl_desc radeon_ioctls_kms[] = {
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -761,18 +741,18 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8296632a423..d908d8d68f6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -225,6 +225,7 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
+ struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -233,7 +234,7 @@ struct radeon_mode_info {
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[6];
+ struct radeon_afmt *afmt[7];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2020bf4a383..c0fa4aa9cee 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
return r;
}
bo->rdev = rdev;
- bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c48001..209b1115026 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
* @bo: radeon object for which we query the offset
*
* Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c557850cd34..d7555369a3e 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
@@ -624,7 +626,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
/* switch back the user state */
dpm_state = rdev->pm.dpm.user_state;
}
- radeon_dpm_enable_power_state(rdev, dpm_state);
+ mutex_lock(&rdev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ rdev->pm.dpm.thermal_active = true;
+ else
+ rdev->pm.dpm.thermal_active = false;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+
+ radeon_pm_compute_clocks(rdev);
}
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
@@ -687,7 +697,10 @@ restart_search:
break;
/* internal states */
case POWER_STATE_TYPE_INTERNAL_UVD:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps)
+ return rdev->pm.dpm.uvd_ps;
+ else
+ break;
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return ps;
@@ -729,10 +742,17 @@ restart_search:
/* use a fallback state if we didn't match */
switch (dpm_state) {
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps) {
+ return rdev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
case POWER_STATE_TYPE_INTERNAL_THERMAL:
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
goto restart_search;
@@ -850,38 +870,51 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
+ /* force low perf level for thermal */
+ if (rdev->pm.dpm.thermal_active &&
+ rdev->asic->dpm.force_performance_level) {
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ }
+
done:
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
{
- if (!rdev->pm.dpm_enabled)
- return;
+ enum radeon_pm_state_type dpm_state;
- mutex_lock(&rdev->pm.mutex);
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- rdev->pm.dpm.thermal_active = true;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD:
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- rdev->pm.dpm.uvd_active = true;
- break;
- default:
- rdev->pm.dpm.thermal_active = false;
- rdev->pm.dpm.uvd_active = false;
- break;
+ if (rdev->asic->dpm.powergate_uvd) {
+ mutex_lock(&rdev->pm.mutex);
+ /* enable/disable UVD */
+ radeon_dpm_powergate_uvd(rdev, !enable);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ if (enable) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = true;
+ if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
+ else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
+ else
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = false;
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ radeon_pm_compute_clocks(rdev);
}
- rdev->pm.dpm.state = dpm_state;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
}
static void radeon_pm_suspend_old(struct radeon_device *rdev)
@@ -1176,6 +1209,9 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 65b9eabd5a2..20074560fc2 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
return ERR_PTR(ret);
- bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fb5ea620897..46a25f037b8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -363,11 +363,10 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
{
u32 rptr;
- if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
+ if (rdev->wb.enabled)
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
u32 wptr;
wptr = RREG32(ring->wptr_reg);
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ WREG32(ring->wptr_reg, ring->wptr);
(void)RREG32(ring->wptr_reg);
}
@@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
- * @ptr_reg_shift: bit offset of the rptr/wptr values
- * @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
{
int r;
@@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
- ring->ptr_reg_shift = ptr_reg_shift;
- ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6c0ce8915fa..71245d6f34a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
static void radeon_move_null(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index d8b05f7bcf1..33858364fe8 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -35,6 +35,12 @@
#define SI_PFP_UCODE_SIZE 2144
#define SI_PM4_UCODE_SIZE 2144
#define SI_CE_UCODE_SIZE 2144
+#define CIK_PFP_UCODE_SIZE 2144
+#define CIK_ME_UCODE_SIZE 2144
+#define CIK_CE_UCODE_SIZE 2144
+
+/* MEC */
+#define CIK_MEC_UCODE_SIZE 4192
/* RLC */
#define R600_RLC_UCODE_SIZE 768
@@ -43,12 +49,20 @@
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
#define SI_RLC_UCODE_SIZE 2048
+#define BONAIRE_RLC_UCODE_SIZE 2048
+#define KB_RLC_UCODE_SIZE 2560
+#define KV_RLC_UCODE_SIZE 2560
/* MC */
#define BTC_MC_UCODE_SIZE 6024
#define CAYMAN_MC_UCODE_SIZE 6037
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
+#define CIK_MC_UCODE_SIZE 7866
+
+/* SDMA */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
/* SMC */
#define RV770_SMC_UCODE_START 0x0100
@@ -126,4 +140,7 @@
#define HAINAN_SMC_UCODE_START 0x10000
#define HAINAN_SMC_UCODE_SIZE 0xe67C
+#define BONAIRE_SMC_UCODE_START 0x20000
+#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index b79f4f5cdd6..1a01bbff9bf 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -147,6 +147,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
+ rdev->uvd.img_size[i] = 0;
}
return 0;
@@ -347,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[])
{
int32_t *msg, msg_type, handle;
+ unsigned img_size = 0;
void *ptr;
int i, r;
@@ -383,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
if (msg_type == 1) {
/* it's a decode msg, calc buffer sizes */
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ /* calc image size (width * height) */
+ img_size = msg[6] * msg[7];
radeon_bo_kunmap(bo);
if (r)
return r;
@@ -394,6 +398,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
radeon_bo_kunmap(bo);
return 0;
} else {
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
if (msg_type != 0) {
@@ -414,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
@@ -733,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
+/**
+ * radeon_uvd_count_handles - count number of open streams
+ *
+ * @rdev: radeon_device pointer
+ * @sd: number of SD streams
+ * @hd: number of HD streams
+ *
+ * Count the number of open SD/HD streams as a hint for power mangement
+ */
+static void radeon_uvd_count_handles(struct radeon_device *rdev,
+ unsigned *sd, unsigned *hd)
+{
+ unsigned i;
+
+ *sd = 0;
+ *hd = 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_read(&rdev->uvd.handles[i]))
+ continue;
+
+ if (rdev->uvd.img_size[i] >= 720*576)
+ ++(*hd);
+ else
+ ++(*sd);
+ }
+}
+
static void radeon_uvd_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -740,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.uvd_active = false;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+ radeon_dpm_enable_uvd(rdev, false);
} else {
radeon_set_uvd_clocks(rdev, 0, 0);
}
@@ -755,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
void radeon_uvd_note_usage(struct radeon_device *rdev)
{
+ bool streams_changed = false;
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
- if (set_clocks) {
+
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ unsigned hd = 0, sd = 0;
+ radeon_uvd_count_handles(rdev, &sd, &hd);
+ if ((rdev->pm.dpm.sd != sd) ||
+ (rdev->pm.dpm.hd != hd)) {
+ rdev->pm.dpm.sd = sd;
+ rdev->pm.dpm.hd = hd;
+ streams_changed = true;
+ }
+ }
+
+ if (set_clocks || streams_changed) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- /* XXX pick SD/HD/MVC */
- radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
+ radeon_dpm_enable_uvd(rdev, true);
} else {
radeon_set_uvd_clocks(rdev, 53300, 40000);
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9fa1f..b8074a8ec75 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
- WREG32_MC(RS480_MC_MISC_CNTL,
- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
} else {
- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index bdd888b4db2..ab1f2016f21 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1918,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -1926,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.ps[i].ps_priv = ps;
rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv6xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f5e92cfcc14..9f5846743c9 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
-int rv770_uvd_resume(struct radeon_device *rdev)
-{
- uint64_t addr;
- uint32_t chip_id, size;
- int r;
-
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
-
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
-
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
-
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
-
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
-
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
-
- /* tell firmware which hardware it is running on */
- switch (rdev->family) {
- default:
- return -EINVAL;
- case CHIP_RV710:
- chip_id = 0x01000005;
- break;
- case CHIP_RV730:
- chip_id = 0x01000006;
- break;
- case CHIP_RV740:
- chip_id = 0x01000007;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- chip_id = 0x01000008;
- break;
- case CHIP_JUNIPER:
- chip_id = 0x01000009;
- break;
- case CHIP_REDWOOD:
- chip_id = 0x0100000a;
- break;
- case CHIP_CEDAR:
- chip_id = 0x0100000b;
- break;
- case CHIP_SUMO:
- case CHIP_SUMO2:
- chip_id = 0x0100000c;
- break;
- case CHIP_PALM:
- chip_id = 0x0100000e;
- break;
- case CHIP_CAYMAN:
- chip_id = 0x0100000f;
- break;
- case CHIP_BARTS:
- chip_id = 0x01000010;
- break;
- case CHIP_TURKS:
- chip_id = 0x01000011;
- break;
- case CHIP_CAICOS:
- chip_id = 0x01000012;
- break;
- case CHIP_TAHITI:
- chip_id = 0x01000014;
- break;
- case CHIP_VERDE:
- chip_id = 0x01000015;
- break;
- case CHIP_PITCAIRN:
- chip_id = 0x01000016;
- break;
- case CHIP_ARUBA:
- chip_id = 0x01000017;
- break;
- }
- WREG32(UVD_VCPU_CHIP_ID, chip_id);
-
- return 0;
-}
-
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -1747,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
-/**
- * rv770_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r7xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFF)
- cur_size_in_dw = 0xFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -1829,6 +1658,11 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
rv770_mc_program(rdev);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -1839,10 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1852,12 +1682,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -1876,7 +1700,7 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -1905,14 +1729,14 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1929,12 +1753,11 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
@@ -1984,7 +1807,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2092,7 +1915,6 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2100,7 +1922,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 00000000000..f9b02e3d683
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 094c67a29d0..8cbb85dae5a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv7xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -2517,8 +2518,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = 300;
- if (vblank_time < 300)
+ /* quirks */
+ /* ASUS K70AF */
+ if ((rdev->pdev->device == 0x9553) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0x1c42))
+ switch_limit = 200;
+
+ if (vblank_time < switch_limit)
return true;
else
return false;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6bef2b7d601..9fe60e54292 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -971,7 +971,21 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
/* UVD */
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+
#define UVD_LMI_EXT40_ADDR 0xf498
#define UVD_VCPU_CHIP_ID 0xf4d4
#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
@@ -985,4 +999,6 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CONTEXT_ID 0xf6f4
+
#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index daa8d2df8ec..3e23b757dcf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -76,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -1704,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1719,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -3364,17 +3386,6 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
- WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
- SOFT_RESET_PA |
- SOFT_RESET_VGT |
- SOFT_RESET_SPI |
- SOFT_RESET_SX));
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3387,8 +3398,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3420,8 +3431,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3446,8 +3457,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3493,7 +3504,7 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3742,34 +3753,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * si_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = si_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
static void si_mc_program(struct radeon_device *rdev)
{
@@ -4083,13 +4066,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
return 0;
}
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+ u32 start_reg, reg, i;
+ u32 command = ib[idx + 4];
+ u32 info = ib[idx + 1];
+ u32 idx_value = ib[idx];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
- u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -4190,50 +4224,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
}
break;
case PACKET3_CP_DMA:
- command = ib[idx + 4];
- info = ib[idx + 1];
- if (command & PACKET3_CP_DMA_CMD_SAS) {
- /* src address space is register */
- if (((info & 0x60000000) >> 29) == 0) {
- start_reg = idx_value << 2;
- if (command & PACKET3_CP_DMA_CMD_SAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
- if (command & PACKET3_CP_DMA_CMD_DAS) {
- /* dst address space is register */
- if (((info & 0x00300000) >> 20) == 0) {
- start_reg = ib[idx + 2];
- if (command & PACKET3_CP_DMA_CMD_DAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -4245,6 +4238,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
static int si_vm_packet3_compute_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, reg, i;
@@ -4317,6 +4311,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
+ break;
default:
DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -4708,58 +4707,7 @@ void si_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+ si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -4806,32 +4754,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm->id < 8) {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- } else {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
- }
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
/*
* Power and clock gating
*/
@@ -4899,7 +4821,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev,
WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
}
-static void si_init_uvd_internal_cg(struct radeon_device *rdev)
+void si_init_uvd_internal_cg(struct radeon_device *rdev)
{
bool hw_mode = true;
@@ -4942,7 +4864,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
u32 data, orig;
orig = data = RREG32(DMA_PG);
- if (enable)
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
data |= PG_CNTL_ENABLE;
else
data &= ~PG_CNTL_ENABLE;
@@ -4966,7 +4888,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5069,9 +4991,9 @@ static void si_enable_cgcg(struct radeon_device *rdev,
orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
- si_enable_gui_idle_interrupt(rdev, enable);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ si_enable_gui_idle_interrupt(rdev, true);
- if (enable) {
WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
tmp = si_halt_rlc(rdev);
@@ -5088,6 +5010,8 @@ static void si_enable_cgcg(struct radeon_device *rdev,
data |= CGCG_EN | CGLS_EN;
} else {
+ si_enable_gui_idle_interrupt(rdev, false);
+
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
@@ -5105,16 +5029,18 @@ static void si_enable_mgcg(struct radeon_device *rdev,
{
u32 data, orig, tmp = 0;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
orig = data = RREG32(CGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
WREG32(CGTS_SM_CTRL_REG, data);
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
- if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
@@ -5159,7 +5085,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev,
{
u32 orig, data, tmp;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
tmp |= 0x3fff;
WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
@@ -5207,7 +5133,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable)
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
data |= MC_LS_ENABLE;
else
data &= ~MC_LS_ENABLE;
@@ -5216,226 +5142,295 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
}
}
-
-static void si_init_cg(struct radeon_device *rdev)
+static void si_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- si_enable_mgcg(rdev, true);
- si_enable_cgcg(rdev, false);
- /* disable MC LS on Tahiti */
- if (rdev->family == CHIP_TAHITI)
- si_enable_mc_ls(rdev, false);
- if (rdev->has_uvd) {
- si_enable_uvd_mgcg(rdev, true);
- si_init_uvd_internal_cg(rdev);
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
}
}
-static void si_fini_cg(struct radeon_device *rdev)
+static void si_enable_dma_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- if (rdev->has_uvd)
- si_enable_uvd_mgcg(rdev, false);
- si_enable_cgcg(rdev, false);
- si_enable_mgcg(rdev, false);
-}
+ u32 orig, data, offset;
+ int i;
-static void si_init_pg(struct radeon_device *rdev)
-{
- bool has_pg = false;
-#if 0
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
-#endif
- if (has_pg) {
- si_init_ao_cu_mask(rdev);
- si_init_dma_pg(rdev);
- si_enable_dma_pg(rdev, true);
- si_init_gfx_cgpg(rdev);
- si_enable_gfx_cgpg(rdev, true);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data &= ~MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ }
} else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data |= MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+
+ orig = data = RREG32(DMA_CLK_CTRL + offset);
+ data = 0xff000000;
+ if (data != orig)
+ WREG32(DMA_CLK_CTRL + offset, data);
+ }
}
}
-static void si_fini_pg(struct radeon_device *rdev)
+static void si_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
{
- bool has_pg = false;
+ u32 orig, data;
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
+ orig = data = RREG32_PCIE(PCIE_CNTL2);
- if (has_pg) {
- si_enable_dma_pg(rdev, false);
- si_enable_gfx_cgpg(rdev, false);
- }
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE(PCIE_CNTL2, data);
}
-/*
- * RLC
- */
-void si_rlc_fini(struct radeon_device *rdev)
+static void si_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj) {
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ u32 orig, data;
- radeon_bo_unref(&rdev->rlc.save_restore_obj);
- rdev->rlc.save_restore_obj = NULL;
- }
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
- /* clear state block */
- if (rdev->rlc.clear_state_obj) {
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
- radeon_bo_unref(&rdev->rlc.clear_state_obj);
- rdev->rlc.clear_state_obj = NULL;
- }
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
}
-#define RLC_CLEAR_STATE_END_MARKER 0x00000001
-
-int si_rlc_init(struct radeon_device *rdev)
+static void si_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
{
- volatile u32 *dst_ptr;
- u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
- u64 reg_list_mc_addr;
- const struct cs_section_def *cs_data = si_cs_data;
- int r;
+ u32 orig, data;
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
- &rdev->rlc.save_restore_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+
+void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
+{
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ si_enable_mgcg(rdev, true);
+ si_enable_cgcg(rdev, true);
+ } else {
+ si_enable_cgcg(rdev, false);
+ si_enable_mgcg(rdev, false);
}
}
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+ if (block & RADEON_CG_BLOCK_MC) {
+ si_enable_mc_mgcg(rdev, enable);
+ si_enable_mc_ls(rdev, enable);
}
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ si_enable_dma_mgcg(rdev, enable);
}
- if (rdev->family == CHIP_VERDE) {
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
- dst_ptr[i] = verde_rlc_save_restore_register_list[i];
- }
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
+ if (block & RADEON_CG_BLOCK_BIF) {
+ si_enable_bif_mgls(rdev, enable);
}
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd) {
+ si_enable_uvd_mgcg(rdev, enable);
}
}
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
+ if (block & RADEON_CG_BLOCK_HDP) {
+ si_enable_hdp_mgcg(rdev, enable);
+ si_enable_hdp_ls(rdev, enable);
}
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+}
+
+static void si_init_cg(struct radeon_device *rdev)
+{
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ si_init_uvd_internal_cg(rdev);
}
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
+}
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+static void si_fini_cg(struct radeon_device *rdev)
+{
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
}
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+}
+
+u32 si_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
}
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ /* pa_sc_raster_config */
+ count += 3;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
}
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ buffer[count++] = 0x2a00126a;
+ break;
+ case CHIP_VERDE:
+ buffer[count++] = 0x0000124a;
+ break;
+ case CHIP_OLAND:
+ buffer[count++] = 0x00000082;
+ break;
+ case CHIP_HAINAN:
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ break;
+ }
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
- return 0;
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
}
-static void si_rlc_reset(struct radeon_device *rdev)
+static void si_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
+ si_init_dma_pg(rdev);
+ }
+ si_init_ao_cu_mask(rdev);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ si_init_gfx_cgpg(rdev);
+ }
+ si_enable_dma_pg(rdev, true);
+ si_enable_gfx_cgpg(rdev, true);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ }
+}
+
+static void si_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ si_enable_dma_pg(rdev, false);
+ si_enable_gfx_cgpg(rdev, false);
+ }
+}
+
+/*
+ * RLC
+ */
+void si_rlc_reset(struct radeon_device *rdev)
{
u32 tmp = RREG32(GRBM_SOFT_RESET);
@@ -5651,7 +5646,7 @@ static int si_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -6335,80 +6330,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * si_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (SI).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0xFFFFF)
- cur_size_in_bytes = 0xFFFFF;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
- radeon_ring_write(ring, dst_offset & 0xffffffff);
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
/*
* startup/shutdown callbacks
*/
@@ -6422,6 +6343,11 @@ static int si_startup(struct radeon_device *rdev)
/* enable aspm */
si_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
si_mc_program(rdev);
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
@@ -6439,17 +6365,19 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
r = si_pcie_gart_enable(rdev);
if (r)
return r;
si_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->family == CHIP_VERDE) {
+ rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+ }
+ rdev->rlc.cs_data = si_cs_data;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6491,7 +6419,7 @@ static int si_startup(struct radeon_device *rdev)
}
if (rdev->has_uvd) {
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -6520,21 +6448,21 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_RB1_RPTR, CP_RB1_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_RB2_RPTR, CP_RB2_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6542,7 +6470,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6550,7 +6478,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6568,12 +6496,11 @@ static int si_startup(struct radeon_device *rdev)
if (rdev->has_uvd) {
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6591,6 +6518,10 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6622,13 +6553,16 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
}
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -6735,7 +6669,7 @@ int si_init(struct radeon_device *rdev)
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
@@ -6760,16 +6694,16 @@ void si_fini(struct radeon_device *rdev)
{
si_cp_fini(rdev);
cayman_dma_fini(rdev);
- si_irq_fini(rdev);
- si_rlc_fini(rdev);
- si_fini_cg(rdev);
si_fini_pg(rdev);
+ si_fini_cg(rdev);
+ si_irq_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->has_uvd) {
- r600_uvd_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
}
si_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 00000000000..49909d23dfc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 88699e3cd86..5be9b4e7235 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1753,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
+extern void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -3663,7 +3666,7 @@ static void si_clear_vc(struct radeon_device *rdev)
WREG32(CG_FTV, 0);
}
-static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
{
u8 mc_para_index;
@@ -3676,7 +3679,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
return mc_para_index;
}
-static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
{
u8 mc_para_index;
@@ -3758,20 +3761,21 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
return true;
}
-static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
- struct atom_voltage_table *voltage_table)
+void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
{
unsigned int i, diff;
- if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS)
+ if (voltage_table->count <= max_voltage_steps)
return;
- diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS;
+ diff = voltage_table->count - max_voltage_steps;
- for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++)
+ for (i= 0; i < max_voltage_steps; i++)
voltage_table->entries[i] = voltage_table->entries[i + diff];
- voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS;
+ voltage_table->count = max_voltage_steps;
}
static int si_construct_voltage_tables(struct radeon_device *rdev)
@@ -3787,7 +3791,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3796,7 +3802,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddci_voltage_table);
}
if (pi->mvdd_control) {
@@ -3814,7 +3822,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
}
if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &si_pi->mvdd_voltage_table);
}
if (si_pi->vddc_phase_shed_control) {
@@ -5752,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (si_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control)
@@ -5871,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
ni_update_current_ps(rdev, boot_ps);
return 0;
@@ -5881,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (!si_is_smc_running(rdev))
return;
si_disable_ulv(rdev);
@@ -5945,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
ret = si_disable_ulv(rdev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
@@ -6043,6 +6081,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
return 0;
}
@@ -6232,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -6248,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev)
non_clock_info,
non_clock_info_array->ucEntrySize);
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
si_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -6401,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev)
si_initialize_powertune_defaults(rdev);
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c8da27a929..52d2ab6b67a 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -282,6 +282,10 @@
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
@@ -581,6 +585,7 @@
#define CLKS_MASK (0xfff << 0)
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -588,6 +593,8 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
#define ATC_MISC_CG 0x3350
@@ -635,6 +642,54 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
+# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
+# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
+# define AUDIO_ENABLED (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define PORT_CONNECTIVITY_MASK (3 << 30)
+#define PORT_CONNECTIVITY_SHIFT 30
+
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
@@ -755,6 +810,17 @@
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
+#define AFMT_AUDIO_SRC_CONTROL 0x713c
+#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1295,6 +1361,7 @@
/* PCIE registers idx/data 0x30/0x34 */
#define PCIE_CNTL2 0x1c /* PCIE */
# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
# define MST_MEM_LS_EN (1 << 18)
# define REPLAY_MEM_LS_EN (1 << 19)
#define PCIE_LC_STATUS1 0x28 /* PCIE */
@@ -1644,6 +1711,10 @@
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_POWER_CNTL 0xd0bc
+# define MEM_POWER_OVERRIDE (1 << 8)
+#define DMA_CLK_CTRL 0xd0c0
+
#define DMA_PG 0xd0d4
# define PG_CNTL_ENABLE (1 << 0)
#define DMA_PGFSM_CONFIG 0xd0d8
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 00000000000..75a380a1529
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_H
+#define SMU7_H
+
+#pragma pack(push, 1)
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU7_MAX_LEVELS_VDDC 8
+#define SMU7_MAX_LEVELS_VDDCI 4
+#define SMU7_MAX_LEVELS_MVDD 4
+#define SMU7_MAX_LEVELS_VDDNB 8
+
+#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
+#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
+#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
+#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
+#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
+#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
+#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
+#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
+#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+struct SMU7_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUL;
+ int32_t LFWindupLL;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU7_PIDController SMU7_PIDController;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+struct SMU7_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t Reserved[42];
+ uint32_t Signature;
+};
+
+typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 00000000000..82f70c90a9e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_DISCRETE_H
+#define SMU7_DISCRETE_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 3
+#define SMU7_DTE_SINKS 1
+#define SMU7_NUM_CPU_TES 0
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
+
+struct SMU7_Discrete_GraphicsLevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t padding1[2];
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t PowerThrottle;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
+
+struct SMU7_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
+
+struct SMU7_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
+
+struct SMU7_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
+
+struct SMU7_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t Padding;
+ uint32_t DownT;
+ uint32_t UpT;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
+
+
+struct SMU7_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU7_Discrete_MCArbDramTimingTable
+{
+ SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
+
+struct SMU7_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
+
+struct SMU7_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
+
+struct SMU7_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
+
+
+struct SMU7_Discrete_DpmTable
+{
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController MemoryPIDController;
+ SMU7_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
+// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
+ SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
+ SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint8_t MasterDeepSleepControl;
+ uint32_t Reserved[5];
+// uint32_t SamuDefaultLevel;
+
+ SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
+ SMU7_Discrete_MemoryLevel MemoryACPILevel;
+ SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
+ SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
+ SMU7_Discrete_ACPILevel ACPILevel;
+ SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Discrete_Ulv Ulv;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint16_t VddcVddciDelta;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighT;
+ uint16_t FpsLowT;
+
+ uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptT;
+};
+
+typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
+
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
+
+struct SMU7_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
+
+struct SMU7_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
+
+struct SMU7_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+
+struct SMU7_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw10
+ uint8_t BapmVddCVidHiSidd2[8];
+
+ // dw11-dw12
+ uint32_t Reserved6[2];
+
+ // dw13-dw16
+ uint8_t GnbLPML[16];
+
+ // dw17
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw18
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
+
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 00000000000..78ada9ffd50
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_FUSION_H
+#define SMU7_FUSION_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+// All 'soft registers' should be uint32_t.
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Fusion_GraphicsLevel
+{
+ uint32_t MinVddNb;
+
+ uint32_t SclkFrequency;
+
+ uint8_t Vid;
+ uint8_t VidOffset;
+ uint16_t AT;
+
+ uint8_t PowerThrottle;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t SclkDid;
+
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t DeepSleepDivId;
+
+ uint8_t ClkBypassCntl;
+
+ uint32_t reserved;
+};
+
+typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
+
+struct SMU7_Fusion_GIOLevel
+{
+ uint8_t EnabledForActivity;
+ uint8_t LclkDid;
+ uint8_t Vid;
+ uint8_t VoltageDownH;
+
+ uint32_t MinVddNb;
+
+ uint16_t ResidencyCounter;
+ uint8_t UpH;
+ uint8_t DownH;
+
+ uint32_t LclkFrequency;
+
+ uint8_t ActivityLevel;
+ uint8_t EnabledForThrottle;
+
+ uint8_t ClkBypassCntl;
+
+ uint8_t padding;
+};
+
+typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU7_Fusion_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddNb;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+
+ uint8_t VClkBypassCntl;
+ uint8_t DClkBypassCntl;
+
+ uint8_t padding[2];
+
+};
+
+typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU7_Fusion_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t Divider;
+ uint8_t ClkBypassCntl;
+
+ uint32_t Reserved;
+};
+typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
+
+struct SMU7_Fusion_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddNb;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
+
+struct SMU7_Fusion_NbDpm
+{
+ uint8_t DpmXNbPsHi;
+ uint8_t DpmXNbPsLo;
+ uint8_t Dpm0PgNbPsHi;
+ uint8_t Dpm0PgNbPsLo;
+ uint8_t EnablePsi1;
+ uint8_t SkipDPM0;
+ uint8_t SkipPG;
+ uint8_t Hysteresis;
+ uint8_t EnableDpmPstatePoll;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
+
+struct SMU7_Fusion_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+};
+
+typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
+
+struct SMU7_Fusion_DpmTable
+{
+ uint32_t SystemFlags;
+
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController GioPIDController;
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t GIOLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint16_t FpsHighT;
+
+ SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel ACPILevel;
+ SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsInterval;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsVoltageChangeEnable;
+
+ uint8_t GraphicsClkSlowEnable;
+ uint8_t GraphicsClkSlowDivider;
+ uint16_t FpsLowT;
+
+ uint32_t DisplayCac;
+ uint32_t LowSclkInterruptT;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+
+};
+
+struct SMU7_Fusion_GIODpmTable
+{
+
+ SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+
+ SMU7_PIDController GioPIDController;
+
+ uint32_t GIOLevelCount;
+
+ uint8_t Enable;
+ uint8_t GIOVoltageChangeEnable;
+ uint8_t GIOBootLevel;
+ uint8_t padding;
+ uint8_t padding1[2];
+ uint8_t TargetState;
+ uint8_t CurrenttState;
+ uint8_t ThrottleOnHtc;
+ uint8_t ThermThrottleStatus;
+ uint8_t ThermThrottleTempSelect;
+ uint8_t ThermThrottleEnable;
+ uint16_t TemperatureLimitHigh;
+ uint16_t TemperatureLimitLow;
+
+};
+
+typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
+typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index c0a85031990..864761c0120 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
+
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
sumo_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -1530,6 +1534,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ u32 i;
+
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+}
+
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
{
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c78..db1ea32a907 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_2bit);
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit);
u32 sumo_get_sleep_divider_from_id(u32 id);
u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index a1eb5f59939..b07b7b8f1af 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
trinity_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 00000000000..7266805d978
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+/**
+ * uvd_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+}
+
+/**
+ * uvd_v1_0_init - start and test UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+int uvd_v1_0_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t tmp;
+ int r;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ r = uvd_v1_0_start(rdev);
+ if (r)
+ goto done;
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ goto done;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+done:
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * uvd_v1_0_fini - stop the hardware block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+void uvd_v1_0_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ uvd_v1_0_stop(rdev);
+ ring->ready = false;
+}
+
+/**
+ * uvd_v1_0_start - start UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the UVD block
+ */
+int uvd_v1_0_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t rb_bufsz;
+ int i, j, r;
+
+ /* disable byte swapping */
+ u32 lmi_swap_cntl = 0;
+ u32 mp_swap_cntl = 0;
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+ mp_swap_cntl = 0;
+#endif
+ WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+ WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = order_base_2(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
+
+ return 0;
+}
+
+/**
+ * uvd_v1_0_stop - stop UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * stop the UVD block
+ */
+void uvd_v1_0_stop(struct radeon_device *rdev)
+{
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+}
+
+/**
+ * uvd_v1_0_ring_test - register write test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * uvd_v1_0_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
+/**
+ * uvd_v1_0_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v1_0_ib_test - test ib execution
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 00000000000..b19ef495108
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * uvd_v2_2_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
+/**
+ * uvd_v2_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v2_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 00000000000..5b6fa1f62d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+/**
+ * uvd_v3_1_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 00000000000..d04d5073eef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/**
+ * uvd_v4_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v4_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t size;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 72887df8dd7..c590cd9dca0 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+
+config DRM_RCAR_LVDS
+ bool "R-Car DU LVDS Encoder Support"
+ depends on DRM_RCAR_DU
+ help
+ Enable support the R-Car Display Unit embedded LVDS encoders
+ (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 7333c009401..12b8d447783 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,8 +1,12 @@
rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_drv.o \
+ rcar_du_encoder.o \
+ rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_lvds.o \
+ rcar_du_lvdscon.o \
rcar_du_plane.o \
- rcar_du_vga.o
+ rcar_du_vgacon.o
-obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
+
+obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 24183fb9359..a9d24e4bf79 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -23,30 +23,26 @@
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
-
-#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
u32 clr, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
}
+static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rcrtc->clock);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_du_group_get(rcrtc->group);
+ if (ret < 0)
+ clk_disable_unprepare(rcrtc->clock);
+
+ return ret;
+}
+
+static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
+{
+ rcar_du_group_put(rcrtc->group);
+ clk_disable_unprepare(rcrtc->clock);
+}
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
- struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
- const struct drm_display_mode *mode = &crtc->mode;
+ const struct drm_display_mode *mode = &rcrtc->crtc.mode;
unsigned long clk;
u32 value;
u32 div;
/* Dot clock */
- clk = clk_get_rate(rcdu->clock);
+ clk = clk_get_rate(rcrtc->clock);
div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
div = clamp(div, 1U, 64U) - 1;
- rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR,
- ESCR_DCLKSEL_CLKS | div);
- rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
+ ESCR_DCLKSEL_CLKS | div);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
-static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc)
-{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
- u32 dorcr = rcar_du_read(rcdu, DORCR);
-
- dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
-
- /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
- * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
- * default.
- */
- if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
- dorcr |= DORCR_PG2D_DS1;
- else
- dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
-
- rcar_du_write(rcdu, DORCR, dorcr);
-}
-
-static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
- rcar_du_write(rcdu, DSYSR,
- (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
- (start ? DSYSR_DEN : DSYSR_DRES));
-}
-
-static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
- /* Many of the configuration bits are only updated when the display
- * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
- * of those bits could be pre-configured, but others (especially the
- * bits related to plane assignment to display timing controllers) need
- * to be modified at runtime.
- *
- * Restart the display controller if a start is requested. Sorry for the
- * flicker. It should be possible to move most of the "DRES-update" bits
- * setup to driver initialization time and minimize the number of cases
- * when the display controller will have to be restarted.
- */
- if (start) {
- if (rcdu->used_crtcs++ != 0)
- __rcar_du_start_stop(rcdu, false);
- __rcar_du_start_stop(rcdu, true);
- } else {
- if (--rcdu->used_crtcs == 0)
- __rcar_du_start_stop(rcdu, false);
- }
-}
-
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+ enum rcar_du_output output)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
/* Store the route from the CRTC output to the DU output. The DU will be
* configured when starting the CRTC.
*/
- rcrtc->outputs |= 1 << output;
+ rcrtc->outputs |= BIT(output);
+
+ /* Store RGB routing to DPAD0 for R8A7790. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
+ output == RCAR_DU_OUTPUT_DPAD0)
+ rcdu->dpad0_source = rcrtc->index;
}
void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
u32 dptsr = 0;
u32 dspr = 0;
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
unsigned int j;
if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
/* Select display timing and dot clock generator 2 for planes associated
* with superposition controller 2.
*/
- if (rcrtc->index) {
- u32 value = rcar_du_read(rcdu, DPTSR);
+ if (rcrtc->index % 2) {
+ u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
/* The DPTSR register is updated when the display controller is
* stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* occur only if we need to break the pre-association.
*/
if (value != dptsr) {
- rcar_du_write(rcdu, DPTSR, dptsr);
- if (rcdu->used_crtcs) {
- __rcar_du_start_stop(rcdu, false);
- __rcar_du_start_stop(rcdu, true);
- }
+ rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
+ if (rcrtc->group->used_crtcs)
+ rcar_du_group_restart(rcrtc->group);
}
}
- rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
+ dspr);
}
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
unsigned int i;
if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
/* Configure display timings and output routing */
rcar_du_crtc_set_display_timing(rcrtc);
- rcar_du_crtc_set_routing(rcrtc);
+ rcar_du_group_set_routing(rcrtc->group);
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rcrtc->group->planes.lock);
rcrtc->plane->enabled = true;
rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rcrtc->group->planes.lock);
/* Setup planes. */
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
if (plane->crtc != crtc || !plane->enabled)
continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
- rcar_du_start_stop(rcdu, true);
+ rcar_du_group_start_stop(rcrtc->group, true);
rcrtc->started = true;
}
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
if (!rcrtc->started)
return;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rcrtc->group->planes.lock);
rcrtc->plane->enabled = false;
rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rcrtc->group->planes.lock);
/* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
- rcar_du_start_stop(rcdu, false);
+ rcar_du_group_start_stop(rcrtc->group, false);
rcrtc->started = false;
}
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
rcar_du_crtc_stop(rcrtc);
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
}
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
if (rcrtc->dpms != DRM_MODE_DPMS_ON)
return;
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
}
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
if (rcrtc->dpms == mode)
return;
if (mode == DRM_MODE_DPMS_ON) {
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
} else {
rcar_du_crtc_stop(rcrtc);
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
}
rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
/* We need to access the hardware during mode set, acquire a reference
- * to the DU.
+ * to the CRTC.
*/
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
/* Stop the CRTC and release the plane. Force the DPMS mode to off as a
* result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
const struct rcar_du_format_info *format;
int ret;
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
error:
/* There's no rollback/abort operation to clean up in case of error. We
- * thus need to release the reference to the DU acquired in prepare()
+ * thus need to release the reference to the CRTC acquired in prepare()
* here.
*/
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
return ret;
}
@@ -514,9 +477,28 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
drm_vblank_put(dev, rcrtc->index);
}
+static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
+{
+ struct rcar_du_crtc *rcrtc = arg;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ status = rcar_du_crtc_read(rcrtc, DSSR);
+ rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
+
+ if (status & DSSR_VBK) {
+ drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ rcar_du_crtc_finish_page_flip(rcrtc);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
@@ -549,16 +531,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
.page_flip = rcar_du_crtc_page_flip,
};
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
{
+ static const unsigned int mmio_offsets[] = {
+ DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
+ };
+
+ struct rcar_du_device *rcdu = rgrp->dev;
+ struct platform_device *pdev = to_platform_device(rcdu->dev);
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
struct drm_crtc *crtc = &rcrtc->crtc;
+ unsigned int irqflags;
+ char clk_name[5];
+ char *name;
+ int irq;
int ret;
- rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0;
+ /* Get the CRTC clock. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ sprintf(clk_name, "du.%u", index);
+ name = clk_name;
+ } else {
+ name = NULL;
+ }
+
+ rcrtc->clock = devm_clk_get(rcdu->dev, name);
+ if (IS_ERR(rcrtc->clock)) {
+ dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
+ return PTR_ERR(rcrtc->clock);
+ }
+
+ rcrtc->group = rgrp;
+ rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
rcrtc->dpms = DRM_MODE_DPMS_OFF;
- rcrtc->plane = &rcdu->planes.planes[index];
+ rcrtc->plane = &rgrp->planes.planes[index % 2];
rcrtc->plane->crtc = crtc;
@@ -568,6 +575,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+ /* Register the interrupt handler. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ irq = platform_get_irq(pdev, index);
+ irqflags = 0;
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ irqflags = IRQF_SHARED;
+ }
+
+ if (irq < 0) {
+ dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
+ return ret;
+ }
+
+ ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
+ dev_name(rcdu->dev), rcrtc);
+ if (ret < 0) {
+ dev_err(rcdu->dev,
+ "failed to register IRQ for CRTC %u\n", index);
+ return ret;
+ }
+
return 0;
}
@@ -580,16 +609,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
}
}
-
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
-{
- u32 status;
-
- status = rcar_du_crtc_read(rcrtc, DSSR);
- rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
-
- if (status & DSSR_VBK) {
- drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
- rcar_du_crtc_finish_page_flip(rcrtc);
- }
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2a0365bcbd1..43e7575c700 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,16 +15,18 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/platform_data/rcar-du.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
-struct rcar_du_device;
+struct rcar_du_group;
struct rcar_du_plane;
struct rcar_du_crtc {
struct drm_crtc crtc;
+ struct clk *clock;
unsigned int mmio_offset;
unsigned int index;
bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
unsigned int outputs;
int dpms;
+ struct rcar_du_group *group;
struct rcar_du_plane *plane;
};
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index);
+#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
struct drm_file *file);
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output);
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+ enum rcar_du_output output);
void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index dc0fe09b2ba..0023f9719cf 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_crtc.h"
@@ -29,74 +30,21 @@
#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
- * Core device operations
- */
-
-/*
- * rcar_du_get - Acquire a reference to the DU
- *
- * Acquiring a reference enables the device clock and setup core registers. A
- * reference must be held before accessing any hardware registers.
- *
- * This function must be called with the DRM mode_config lock held.
- *
- * Return 0 in case of success or a negative error code otherwise.
- */
-int rcar_du_get(struct rcar_du_device *rcdu)
-{
- int ret;
-
- if (rcdu->use_count)
- goto done;
-
- /* Enable clocks before accessing the hardware. */
- ret = clk_prepare_enable(rcdu->clock);
- if (ret < 0)
- return ret;
-
- /* Enable extended features */
- rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
- rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
- rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
- rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
- rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
-
- /* Use DS1PR and DS2PR to configure planes priorities and connects the
- * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
- */
- rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
-
-done:
- rcdu->use_count++;
- return 0;
-}
-
-/*
- * rcar_du_put - Release a reference to the DU
- *
- * Releasing the last reference disables the device clock.
- *
- * This function must be called with the DRM mode_config lock held.
- */
-void rcar_du_put(struct rcar_du_device *rcdu)
-{
- if (--rcdu->use_count)
- return;
-
- clk_disable_unprepare(rcdu->clock);
-}
-
-/* -----------------------------------------------------------------------------
* DRM operations
*/
static int rcar_du_unload(struct drm_device *dev)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
+
+ if (rcdu->fbdev)
+ drm_fbdev_cma_fini(rcdu->fbdev);
+
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
- drm_irq_uninstall(dev);
+ dev->irq_enabled = 0;
dev->dev_private = NULL;
return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
struct platform_device *pdev = dev->platformdev;
struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
struct rcar_du_device *rcdu;
- struct resource *ioarea;
struct resource *mem;
int ret;
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
rcdu->dev = &pdev->dev;
rcdu->pdata = pdata;
+ rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
rcdu->ddev = dev;
dev->dev_private = rcdu;
- /* I/O resources and clocks */
+ /* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -EINVAL;
- }
-
- ioarea = devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), pdev->name);
- if (ioarea == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- return -EBUSY;
- }
-
- rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
- resource_size(ioarea));
- if (rcdu->mmio == NULL) {
- dev_err(&pdev->dev, "failed to remap memory resource\n");
- return -ENOMEM;
- }
-
- rcdu->clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(rcdu->clock)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return -ENOENT;
- }
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
goto done;
}
- /* IRQ and vblank handling */
+ /* vblank handling */
ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize vblank\n");
goto done;
}
- ret = drm_irq_install(dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to install IRQ handler\n");
- goto done;
- }
+ dev->irq_enabled = 1;
platform_set_drvdata(pdev, rcdu);
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
struct rcar_du_device *rcdu = dev->dev_private;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+ for (i = 0; i < rcdu->num_crtcs; ++i)
rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
}
-static irqreturn_t rcar_du_irq(int irq, void *arg)
+static void rcar_du_lastclose(struct drm_device *dev)
{
- struct drm_device *dev = arg;
struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
- rcar_du_crtc_irq(&rcdu->crtcs[i]);
- return IRQ_HANDLED;
+ drm_fbdev_cma_restore_mode(rcdu->fbdev);
}
static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
.load = rcar_du_load,
.unload = rcar_du_unload,
.preclose = rcar_du_preclose,
- .irq_handler = rcar_du_irq,
+ .lastclose = rcar_du_lastclose,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
return 0;
}
+static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+ .features = 0,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7779 has two RGB outputs and one (currently unsupported)
+ * TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1) | BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ },
+ .num_lvds = 0,
+};
+
+static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
+ | RCAR_DU_FEATURE_DEFR8,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7790 has one RGB output, two LVDS outputs and one
+ * (currently unsupported) TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ [RCAR_DU_OUTPUT_LVDS1] = {
+ .possible_crtcs = BIT(2) | BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ },
+ .num_lvds = 2,
+};
+
+static const struct platform_device_id rcar_du_id_table[] = {
+ { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
+ { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
+
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
.name = "rcar-du",
.pm = &rcar_du_pm_ops,
},
+ .id_table = rcar_du_id_table,
};
module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 193cc59d495..65d2d636b00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,43 +15,74 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
-#include <linux/mutex.h>
#include <linux/platform_data/rcar-du.h>
#include "rcar_du_crtc.h"
-#include "rcar_du_plane.h"
+#include "rcar_du_group.h"
struct clk;
struct device;
struct drm_device;
+struct drm_fbdev_cma;
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
+#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
+#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
+
+/*
+ * struct rcar_du_output_routing - Output routing specification
+ * @possible_crtcs: bitmask of possible CRTCs for the output
+ * @encoder_type: DRM type of the internal encoder associated with the output
+ *
+ * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
+ * specify the valid SoC outputs, which CRTCs can drive the output, and the type
+ * of in-SoC encoder for the output.
+ */
+struct rcar_du_output_routing {
+ unsigned int possible_crtcs;
+ unsigned int encoder_type;
+};
+
+/*
+ * struct rcar_du_device_info - DU model-specific information
+ * @features: device features (RCAR_DU_FEATURE_*)
+ * @num_crtcs: total number of CRTCs
+ * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
+ * @num_lvds: number of internal LVDS encoders
+ */
+struct rcar_du_device_info {
+ unsigned int features;
+ unsigned int num_crtcs;
+ struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
+ unsigned int num_lvds;
+};
struct rcar_du_device {
struct device *dev;
const struct rcar_du_platform_data *pdata;
+ const struct rcar_du_device_info *info;
void __iomem *mmio;
- struct clk *clock;
- unsigned int use_count;
struct drm_device *ddev;
+ struct drm_fbdev_cma *fbdev;
- struct rcar_du_crtc crtcs[2];
- unsigned int used_crtcs;
+ struct rcar_du_crtc crtcs[3];
unsigned int num_crtcs;
- struct {
- struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
- unsigned int free;
- struct mutex lock;
+ struct rcar_du_group groups[2];
- struct drm_property *alpha;
- struct drm_property *colorkey;
- struct drm_property *zpos;
- } planes;
+ unsigned int dpad0_source;
+ struct rcar_du_lvdsenc *lvds[2];
};
-int rcar_du_get(struct rcar_du_device *rcdu);
-void rcar_du_put(struct rcar_du_device *rcdu);
+static inline bool rcar_du_has(struct rcar_du_device *rcdu,
+ unsigned int feature)
+{
+ return rcdu->info->features & feature;
+}
static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644
index 00000000000..3daa7a168dc
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -0,0 +1,202 @@
+/*
+ * rcar_du_encoder.c -- R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvdscon.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_du_vgacon.h"
+
+/* -----------------------------------------------------------------------------
+ * Common connector functions
+ */
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector)
+{
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+ return &rcon->encoder->encoder;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
+}
+
+static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ const struct drm_display_mode *panel_mode;
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ bool found = false;
+
+ /* DAC encoders have currently no restriction on the mode. */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+ return true;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_dbg(dev->dev, "mode_fixup: no connector found\n");
+ return false;
+ }
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+ return false;
+ }
+
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+
+ /* We're not allowed to modify the resolution. */
+ if (mode->hdisplay != panel_mode->hdisplay ||
+ mode->vdisplay != panel_mode->vdisplay)
+ return false;
+
+ /* The flat panel mode is fixed, just copy it to the adjusted mode. */
+ drm_mode_copy(adjusted_mode, panel_mode);
+
+ /* The internal LVDS encoder has a clock frequency operating range of
+ * 30MHz to 150MHz. Clamp the clock accordingly.
+ */
+ if (renc->lvds)
+ adjusted_mode->clock = clamp(adjusted_mode->clock,
+ 30000, 150000);
+
+ return true;
+}
+
+static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+ DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+ DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ rcar_du_crtc_route_output(encoder->crtc, renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = rcar_du_encoder_dpms,
+ .mode_fixup = rcar_du_encoder_mode_fixup,
+ .prepare = rcar_du_encoder_mode_prepare,
+ .commit = rcar_du_encoder_mode_commit,
+ .mode_set = rcar_du_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ enum rcar_du_encoder_type type,
+ enum rcar_du_output output,
+ const struct rcar_du_encoder_data *data)
+{
+ struct rcar_du_encoder *renc;
+ unsigned int encoder_type;
+ int ret;
+
+ renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
+ if (renc == NULL)
+ return -ENOMEM;
+
+ renc->output = output;
+
+ switch (output) {
+ case RCAR_DU_OUTPUT_LVDS0:
+ renc->lvds = rcdu->lvds[0];
+ break;
+
+ case RCAR_DU_OUTPUT_LVDS1:
+ renc->lvds = rcdu->lvds[1];
+ break;
+
+ default:
+ break;
+ }
+
+ switch (type) {
+ case RCAR_DU_ENCODER_VGA:
+ encoder_type = DRM_MODE_ENCODER_DAC;
+ break;
+ case RCAR_DU_ENCODER_LVDS:
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ case RCAR_DU_ENCODER_NONE:
+ default:
+ /* No external encoder, use the internal encoder type. */
+ encoder_type = rcdu->info->routes[output].encoder_type;
+ break;
+ }
+
+ ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
+ encoder_type);
+ if (ret < 0)
+ return ret;
+
+ drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+
+ switch (encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ return rcar_du_lvds_connector_init(rcdu, renc,
+ &data->connector.lvds.panel);
+
+ case DRM_MODE_ENCODER_DAC:
+ return rcar_du_vga_connector_init(rcdu, renc);
+
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644
index 00000000000..0e5a65e45d0
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -0,0 +1,49 @@
+/*
+ * rcar_du_encoder.h -- R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_ENCODER_H__
+#define __RCAR_DU_ENCODER_H__
+
+#include <linux/platform_data/rcar-du.h>
+
+#include <drm/drm_crtc.h>
+
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+struct rcar_du_encoder {
+ struct drm_encoder encoder;
+ enum rcar_du_output output;
+ struct rcar_du_lvdsenc *lvds;
+};
+
+#define to_rcar_encoder(e) \
+ container_of(e, struct rcar_du_encoder, encoder)
+
+struct rcar_du_connector {
+ struct drm_connector connector;
+ struct rcar_du_encoder *encoder;
+};
+
+#define to_rcar_connector(c) \
+ container_of(c, struct rcar_du_connector, connector)
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector);
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ enum rcar_du_encoder_type type,
+ enum rcar_du_output output,
+ const struct rcar_du_encoder_data *data);
+
+#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644
index 00000000000..eb53cd97e8c
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -0,0 +1,187 @@
+/*
+ * rcar_du_group.c -- R-Car Display Unit Channels Pair
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
+ * unit, timings generator, ...) and device-global resources (start/stop
+ * control, planes, ...) shared between the two CRTCs.
+ *
+ * The R8A7790 introduced a third CRTC with its own set of global resources.
+ * This would be modeled as two separate DU device instances if it wasn't for
+ * a handful or resources that are shared between the three CRTCs (mostly
+ * related to input and output routing). For this reason the R8A7790 DU must be
+ * modeled as a single device with three CRTCs, two sets of "semi-global"
+ * resources, and a few device-global resources.
+ *
+ * The rcar_du_group object is a driver specific object, without any real
+ * counterpart in the DU documentation, that models those semi-global resources.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_group.h"
+#include "rcar_du_regs.h"
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
+{
+ return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
+}
+
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
+{
+ rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
+}
+
+static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
+{
+ u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
+
+ if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
+ return;
+
+ /* The DEFR8 register for the first group also controls RGB output
+ * routing to DPAD0
+ */
+ if (rgrp->index == 0)
+ defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
+
+ rcar_du_group_write(rgrp, DEFR8, defr8);
+}
+
+static void rcar_du_group_setup(struct rcar_du_group *rgrp)
+{
+ /* Enable extended features */
+ rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
+ rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+ rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+ rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+ rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
+
+ rcar_du_group_setup_defr8(rgrp);
+
+ /* Use DS1PR and DS2PR to configure planes priorities and connects the
+ * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+ */
+ rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+}
+
+/*
+ * rcar_du_group_get - Acquire a reference to the DU channels group
+ *
+ * Acquiring the first reference setups core registers. A reference must be held
+ * before accessing any hardware registers.
+ *
+ * This function must be called with the DRM mode_config lock held.
+ *
+ * Return 0 in case of success or a negative error code otherwise.
+ */
+int rcar_du_group_get(struct rcar_du_group *rgrp)
+{
+ if (rgrp->use_count)
+ goto done;
+
+ rcar_du_group_setup(rgrp);
+
+done:
+ rgrp->use_count++;
+ return 0;
+}
+
+/*
+ * rcar_du_group_put - Release a reference to the DU
+ *
+ * This function must be called with the DRM mode_config lock held.
+ */
+void rcar_du_group_put(struct rcar_du_group *rgrp)
+{
+ --rgrp->use_count;
+}
+
+static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+ rcar_du_group_write(rgrp, DSYSR,
+ (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
+ (start ? DSYSR_DEN : DSYSR_DRES));
+}
+
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+ /* Many of the configuration bits are only updated when the display
+ * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
+ * of those bits could be pre-configured, but others (especially the
+ * bits related to plane assignment to display timing controllers) need
+ * to be modified at runtime.
+ *
+ * Restart the display controller if a start is requested. Sorry for the
+ * flicker. It should be possible to move most of the "DRES-update" bits
+ * setup to driver initialization time and minimize the number of cases
+ * when the display controller will have to be restarted.
+ */
+ if (start) {
+ if (rgrp->used_crtcs++ != 0)
+ __rcar_du_group_start_stop(rgrp, false);
+ __rcar_du_group_start_stop(rgrp, true);
+ } else {
+ if (--rgrp->used_crtcs == 0)
+ __rcar_du_group_start_stop(rgrp, false);
+ }
+}
+
+void rcar_du_group_restart(struct rcar_du_group *rgrp)
+{
+ __rcar_du_group_start_stop(rgrp, false);
+ __rcar_du_group_start_stop(rgrp, true);
+}
+
+static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
+{
+ int ret;
+
+ /* RGB output routing to DPAD0 is configured in the DEFR8 register of
+ * the first group. As this function can be called with the DU0 and DU1
+ * CRTCs disabled, we need to enable the first group clock before
+ * accessing the register.
+ */
+ ret = clk_prepare_enable(rcdu->crtcs[0].clock);
+ if (ret < 0)
+ return ret;
+
+ rcar_du_group_setup_defr8(&rcdu->groups[0]);
+
+ clk_disable_unprepare(rcdu->crtcs[0].clock);
+
+ return 0;
+}
+
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
+{
+ struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
+ u32 dorcr = rcar_du_group_read(rgrp, DORCR);
+
+ dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+
+ /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
+ * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
+ * by default.
+ */
+ if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
+ dorcr |= DORCR_PG2D_DS1;
+ else
+ dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+
+ rcar_du_group_write(rgrp, DORCR, dorcr);
+
+ return rcar_du_set_dpad0_routing(rgrp->dev);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644
index 00000000000..5025930972e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -0,0 +1,50 @@
+/*
+ * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_GROUP_H__
+#define __RCAR_DU_GROUP_H__
+
+#include "rcar_du_plane.h"
+
+struct rcar_du_device;
+
+/*
+ * struct rcar_du_group - CRTCs and planes group
+ * @dev: the DU device
+ * @mmio_offset: registers offset in the device memory map
+ * @index: group index
+ * @use_count: number of users of the group (rcar_du_group_(get|put))
+ * @used_crtcs: number of CRTCs currently in use
+ * @planes: planes handled by the group
+ */
+struct rcar_du_group {
+ struct rcar_du_device *dev;
+ unsigned int mmio_offset;
+ unsigned int index;
+
+ unsigned int use_count;
+ unsigned int used_crtcs;
+
+ struct rcar_du_planes planes;
+};
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
+
+int rcar_du_group_get(struct rcar_du_group *rgrp);
+void rcar_du_group_put(struct rcar_du_group *rgrp);
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
+void rcar_du_group_restart(struct rcar_du_group *rgrp);
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
+
+#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index d30c2e29bee..b31ac080c4a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,10 +19,10 @@
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdsenc.h"
#include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -106,46 +106,24 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
}
/* -----------------------------------------------------------------------------
- * Common connector and encoder functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- return &rcon->encoder->encoder;
-}
-
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
-{
-}
-
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- rcar_du_crtc_route_output(encoder->crtc, renc->output);
-}
-
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
-{
-}
-
-/* -----------------------------------------------------------------------------
* Frame buffer
*/
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
- /* The pitch must be aligned to a 16 pixels boundary. */
- align = 16 * args->bpp / 8;
+ /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
+ * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
+ */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ align = 128;
+ else
+ align = 16 * args->bpp / 8;
+
args->pitch = roundup(max(args->pitch, min_pitch), align);
return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
unsigned int align;
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- align = 16 * format->bpp / 8;
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ align = 128;
+ else
+ align = 16 * format->bpp / 8;
if (mode_cmd->pitches[0] & (align - 1) ||
mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_fb_cma_create(dev, file_priv, mode_cmd);
}
+static void rcar_du_output_poll_changed(struct drm_device *dev)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+
+ drm_fbdev_cma_hotplug_event(rcdu->fbdev);
+}
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
+ .output_poll_changed = rcar_du_output_poll_changed,
};
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
+ static const unsigned int mmio_offsets[] = {
+ DU0_REG_OFFSET, DU2_REG_OFFSET
+ };
+
struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder;
+ struct drm_fbdev_cma *fbdev;
+ unsigned int num_groups;
unsigned int i;
int ret;
- drm_mode_config_init(rcdu->ddev);
+ drm_mode_config_init(dev);
- rcdu->ddev->mode_config.min_width = 0;
- rcdu->ddev->mode_config.min_height = 0;
- rcdu->ddev->mode_config.max_width = 4095;
- rcdu->ddev->mode_config.max_height = 2047;
- rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 4095;
+ dev->mode_config.max_height = 2047;
+ dev->mode_config.funcs = &rcar_du_mode_config_funcs;
- ret = rcar_du_plane_init(rcdu);
- if (ret < 0)
- return ret;
+ rcdu->num_crtcs = rcdu->info->num_crtcs;
+
+ /* Initialize the groups. */
+ num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
+
+ for (i = 0; i < num_groups; ++i) {
+ struct rcar_du_group *rgrp = &rcdu->groups[i];
+
+ rgrp->dev = rcdu;
+ rgrp->mmio_offset = mmio_offsets[i];
+ rgrp->index = i;
+
+ ret = rcar_du_planes_init(rgrp);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create the CRTCs. */
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) {
- ret = rcar_du_crtc_create(rcdu, i);
+ ret = rcar_du_crtc_create(rgrp, i);
if (ret < 0)
return ret;
}
- rcdu->used_crtcs = 0;
- rcdu->num_crtcs = i;
+ /* Initialize the encoders. */
+ ret = rcar_du_lvdsenc_init(rcdu);
+ if (ret < 0)
+ return ret;
for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
const struct rcar_du_encoder_data *pdata =
&rcdu->pdata->encoders[i];
+ const struct rcar_du_output_routing *route =
+ &rcdu->info->routes[pdata->output];
+
+ if (pdata->type == RCAR_DU_ENCODER_UNUSED)
+ continue;
- if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) {
+ if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
+ route->possible_crtcs == 0) {
dev_warn(rcdu->dev,
"encoder %u references unexisting output %u, skipping\n",
i, pdata->output);
continue;
}
- switch (pdata->encoder) {
- case RCAR_DU_ENCODER_VGA:
- rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
- break;
-
- case RCAR_DU_ENCODER_LVDS:
- rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
- break;
-
- default:
- break;
- }
+ rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
}
- /* Set the possible CRTCs and possible clones. All encoders can be
- * driven by the CRTC associated with the output they're connected to,
- * as well as by CRTC 0.
+ /* Set the possible CRTCs and possible clones. There's always at least
+ * one way for all encoders to clone each other, set all bits in the
+ * possible clones field.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ const struct rcar_du_output_routing *route =
+ &rcdu->info->routes[renc->output];
- encoder->possible_crtcs = (1 << 0) | (1 << renc->output);
- encoder->possible_clones = 1 << 0;
+ encoder->possible_crtcs = route->possible_crtcs;
+ encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
}
- ret = rcar_du_plane_register(rcdu);
- if (ret < 0)
- return ret;
+ /* Now that the CRTCs have been initialized register the planes. */
+ for (i = 0; i < num_groups; ++i) {
+ ret = rcar_du_planes_register(&rcdu->groups[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_kms_helper_poll_init(dev);
+
+ drm_helper_disable_unused_functions(dev);
+
+ fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
- drm_kms_helper_poll_init(rcdu->ddev);
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ drm_fbdev_cma_restore_mode(fbdev);
+#endif
- drm_helper_disable_unused_functions(rcdu->ddev);
+ rcdu->fbdev = fbdev;
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index dba47226348..5750e6af565 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -16,8 +16,9 @@
#include <linux/types.h>
-#include <drm/drm_crtc.h>
-
+struct drm_file;
+struct drm_device;
+struct drm_mode_create_dumb;
struct rcar_du_device;
struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
unsigned int edf;
};
-struct rcar_du_encoder {
- struct drm_encoder encoder;
- unsigned int output;
-};
-
-#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, encoder)
-
-struct rcar_du_connector {
- struct drm_connector connector;
- struct rcar_du_encoder *encoder;
-};
-
-#define to_rcar_connector(c) \
- container_of(c, struct rcar_du_connector, connector)
-
const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
-
int rcar_du_modeset_init(struct rcar_du_device *rcdu);
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1..4f3ba93cd91 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -16,8 +16,9 @@
#include <drm/drm_crtc_helper.h>
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdscon.h"
struct rcar_du_lvds_connector {
struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
#define to_rcar_lvds_connector(c) \
container_of(c, struct rcar_du_lvds_connector, connector.connector)
-/* -----------------------------------------------------------------------------
- * Connector
- */
-
static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
{
- struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector);
+ struct rcar_du_lvds_connector *lvdscon =
+ to_rcar_lvds_connector(connector);
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = rcar_du_lvds_connector_destroy,
};
-static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel)
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ const struct rcar_du_panel_data *panel)
{
struct rcar_du_lvds_connector *lvdscon;
struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return 0;
}
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- const struct drm_display_mode *panel_mode;
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
- bool found = false;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_dbg(dev->dev, "mode_fixup: no connector found\n");
- return false;
- }
-
- if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
- return false;
- }
-
- panel_mode = list_first_entry(&connector->modes,
- struct drm_display_mode, head);
-
- /* We're not allowed to modify the resolution. */
- if (mode->hdisplay != panel_mode->hdisplay ||
- mode->vdisplay != panel_mode->vdisplay)
- return false;
-
- /* The flat panel mode is fixed, just copy it to the adjusted mode. */
- drm_mode_copy(adjusted_mode, panel_mode);
-
- return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_lvds_encoder_dpms,
- .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
- .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_lvds_data *data,
- unsigned int output)
-{
- struct rcar_du_encoder *renc;
- int ret;
-
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
- return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e10..bff8683699c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,5 +1,5 @@
/*
- * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -11,14 +11,15 @@
* (at your option) any later version.
*/
-#ifndef __RCAR_DU_LVDS_H__
-#define __RCAR_DU_LVDS_H__
+#ifndef __RCAR_DU_LVDSCON_H__
+#define __RCAR_DU_LVDSCON_H__
struct rcar_du_device;
-struct rcar_du_encoder_lvds_data;
+struct rcar_du_encoder;
+struct rcar_du_panel_data;
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_lvds_data *data,
- unsigned int output);
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ const struct rcar_du_panel_data *panel);
-#endif /* __RCAR_DU_LVDS_H__ */
+#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644
index 00000000000..a0f6a178192
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -0,0 +1,196 @@
+/*
+ * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_lvds_regs.h"
+
+struct rcar_du_lvdsenc {
+ struct rcar_du_device *dev;
+
+ unsigned int index;
+ void __iomem *mmio;
+ struct clk *clock;
+ int dpms;
+
+ enum rcar_lvds_input input;
+};
+
+static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
+{
+ iowrite32(data, lvds->mmio + reg);
+}
+
+static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+ struct rcar_du_crtc *rcrtc)
+{
+ const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ unsigned int freq = mode->clock;
+ u32 lvdcr0;
+ u32 pllcr;
+ int ret;
+
+ if (lvds->dpms == DRM_MODE_DPMS_ON)
+ return 0;
+
+ ret = clk_prepare_enable(lvds->clock);
+ if (ret < 0)
+ return ret;
+
+ /* PLL clock configuration */
+ if (freq <= 38000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+ else if (freq <= 60000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+ else if (freq <= 121000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+ else
+ pllcr = LVDPLLCR_PLLDLYCNT_150M;
+
+ rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+ /* Hardcode the channels and control signals routing for now.
+ *
+ * HSYNC -> CTRL0
+ * VSYNC -> CTRL1
+ * DISP -> CTRL2
+ * 0 -> CTRL3
+ *
+ * Channels 1 and 3 are switched on ES1.
+ */
+ rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
+ LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
+ LVDCTRCR_CTR0SEL_HSYNC);
+ rcar_lvds_write(lvds, LVDCHCR,
+ LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
+ LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+ /* Select the input, hardcode mode 0, enable LVDS operation and turn
+ * bias circuitry on.
+ */
+ lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+ if (rcrtc->index == 2)
+ lvdcr0 |= LVDCR0_DUSEL;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
+ LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
+
+ /* Turn the PLL on, wait for the startup delay, and turn the output
+ * on.
+ */
+ lvdcr0 |= LVDCR0_PLLEN;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ usleep_range(100, 150);
+
+ lvdcr0 |= LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ lvds->dpms = DRM_MODE_DPMS_ON;
+ return 0;
+}
+
+static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
+{
+ if (lvds->dpms == DRM_MODE_DPMS_OFF)
+ return;
+
+ rcar_lvds_write(lvds, LVDCR0, 0);
+ rcar_lvds_write(lvds, LVDCR1, 0);
+
+ clk_disable_unprepare(lvds->clock);
+
+ lvds->dpms = DRM_MODE_DPMS_OFF;
+}
+
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode)
+{
+ if (mode == DRM_MODE_DPMS_OFF) {
+ rcar_du_lvdsenc_stop(lvds);
+ return 0;
+ } else if (crtc) {
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ return rcar_du_lvdsenc_start(lvds, rcrtc);
+ } else
+ return -EINVAL;
+}
+
+static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
+ struct platform_device *pdev)
+{
+ struct resource *mem;
+ char name[7];
+
+ sprintf(name, "lvds.%u", lvds->index);
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "failed to get memory resource for %s\n",
+ name);
+ return -EINVAL;
+ }
+
+ lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (lvds->mmio == NULL) {
+ dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
+ name);
+ return -ENOMEM;
+ }
+
+ lvds->clock = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(lvds->clock)) {
+ dev_err(&pdev->dev, "failed to get clock for %s\n", name);
+ return PTR_ERR(lvds->clock);
+ }
+
+ return 0;
+}
+
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+ struct platform_device *pdev = to_platform_device(rcdu->dev);
+ struct rcar_du_lvdsenc *lvds;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < rcdu->info->num_lvds; ++i) {
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (lvds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ lvds->dev = rcdu;
+ lvds->index = i;
+ lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
+ lvds->dpms = DRM_MODE_DPMS_OFF;
+
+ ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
+ if (ret < 0)
+ return ret;
+
+ rcdu->lvds[i] = lvds;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644
index 00000000000..7051c6de19a
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -0,0 +1,46 @@
+/*
+ * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_LVDSENC_H__
+#define __RCAR_DU_LVDSENC_H__
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/rcar-du.h>
+
+struct rcar_drm_crtc;
+struct rcar_du_lvdsenc;
+
+enum rcar_lvds_input {
+ RCAR_LVDS_INPUT_DU0,
+ RCAR_LVDS_INPUT_DU1,
+ RCAR_LVDS_INPUT_DU2,
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode);
+#else
+static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+ return 0;
+}
+static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode)
+{
+ return 0;
+}
+#endif
+
+#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a65f81ddf51..53000644733 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
}
-static u32 rcar_du_plane_read(struct rcar_du_device *rcdu,
+static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
unsigned int index, u32 reg)
{
- return rcar_du_read(rcdu, index * PLANE_OFF + reg);
+ return rcar_du_read(rgrp->dev,
+ rgrp->mmio_offset + index * PLANE_OFF + reg);
}
-static void rcar_du_plane_write(struct rcar_du_device *rcdu,
+static void rcar_du_plane_write(struct rcar_du_group *rgrp,
unsigned int index, u32 reg, u32 data)
{
- rcar_du_write(rcdu, index * PLANE_OFF + reg, data);
+ rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
+ data);
}
int rcar_du_plane_reserve(struct rcar_du_plane *plane,
const struct rcar_du_format_info *format)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
unsigned int i;
int ret = -EBUSY;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rgrp->planes.lock);
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- if (!(rcdu->planes.free & (1 << i)))
+ for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
+ if (!(rgrp->planes.free & (1 << i)))
continue;
if (format->planes == 1 ||
- rcdu->planes.free & (1 << ((i + 1) % 8)))
+ rgrp->planes.free & (1 << ((i + 1) % 8)))
break;
}
- if (i == ARRAY_SIZE(rcdu->planes.planes))
+ if (i == ARRAY_SIZE(rgrp->planes.planes))
goto done;
- rcdu->planes.free &= ~(1 << i);
+ rgrp->planes.free &= ~(1 << i);
if (format->planes == 2)
- rcdu->planes.free &= ~(1 << ((i + 1) % 8));
+ rgrp->planes.free &= ~(1 << ((i + 1) % 8));
plane->hwindex = i;
ret = 0;
done:
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rgrp->planes.lock);
return ret;
}
void rcar_du_plane_release(struct rcar_du_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
if (plane->hwindex == -1)
return;
- mutex_lock(&rcdu->planes.lock);
- rcdu->planes.free |= 1 << plane->hwindex;
+ mutex_lock(&rgrp->planes.lock);
+ rgrp->planes.free |= 1 << plane->hwindex;
if (plane->format->planes == 2)
- rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8);
- mutex_unlock(&rcdu->planes.lock);
+ rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
+ mutex_unlock(&rgrp->planes.lock);
plane->hwindex = -1;
}
void rcar_du_plane_update_base(struct rcar_du_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
- /* According to the datasheet the Y position is expressed in raster line
- * units. However, 32bpp formats seem to require a doubled Y position
- * value. Similarly, for the second plane, NV12 and NV21 formats seem to
+ /* The Y position is expressed in raster line units and must be doubled
+ * for 32bpp formats, according to the R8A7790 datasheet. No mention of
+ * doubling the Y position is found in the R8A7779 datasheet, but the
+ * rule seems to apply there as well.
+ *
+ * Similarly, for the second plane, NV12 and NV21 formats seem to
* require a halved Y position value.
*/
- rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+ rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 32 ? 2 : 1));
- rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
if (plane->format->planes == 2) {
index = (index + 1) % 8;
- rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+ rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 16 ? 2 : 1) / 2);
- rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
}
}
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
unsigned int index)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
u32 colorkey;
u32 pnmr;
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
* enable alpha-blending regardless of the X bit value.
*/
if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
- rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0);
+ rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
else
- rcar_du_plane_write(rcdu, index, PnALPHAR,
+ rcar_du_plane_write(rgrp, index, PnALPHAR,
PnALPHAR_ABIT_X | plane->alpha);
pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
if (plane->format->fourcc == DRM_FORMAT_YUYV)
pnmr |= PnMR_YCDF_YUYV;
- rcar_du_plane_write(rcdu, index, PnMR, pnmr);
+ rcar_du_plane_write(rgrp, index, PnMR, pnmr);
switch (plane->format->fourcc) {
case DRM_FORMAT_RGB565:
colorkey = ((plane->colorkey & 0xf80000) >> 8)
| ((plane->colorkey & 0x00fc00) >> 5)
| ((plane->colorkey & 0x0000f8) >> 3);
- rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+ rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
colorkey = ((plane->colorkey & 0xf80000) >> 9)
| ((plane->colorkey & 0x00f800) >> 6)
| ((plane->colorkey & 0x0000f8) >> 3);
- rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+ rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- rcar_du_plane_write(rcdu, index, PnTC3R,
+ rcar_du_plane_write(rgrp, index, PnTC3R,
PnTC3R_CODE | (plane->colorkey & 0xffffff));
break;
}
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
unsigned int index)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
*/
- ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4);
+ ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
ddcr4 &= ~PnDDCR4_EDF_MASK;
ddcr4 |= plane->format->edf | PnDDCR4_CODE;
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
}
}
- rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2);
- rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4);
+ rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+ rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
/* Memory pitch (expressed in pixels) */
if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
else
mwr = plane->pitch * 8 / plane->format->bpp;
- rcar_du_plane_write(rcdu, index, PnMWR, mwr);
+ rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* Destination position and size */
- rcar_du_plane_write(rcdu, index, PnDSXR, plane->width);
- rcar_du_plane_write(rcdu, index, PnDSYR, plane->height);
- rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x);
- rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y);
+ rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
+ rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
+ rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
+ rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
/* Wrap-around and blinking, disabled */
- rcar_du_plane_write(rcdu, index, PnWASPR, 0);
- rcar_du_plane_write(rcdu, index, PnWAMWR, 4095);
- rcar_du_plane_write(rcdu, index, PnBTR, 0);
- rcar_du_plane_write(rcdu, index, PnMLR, 0);
+ rcar_du_plane_write(rgrp, index, PnWASPR, 0);
+ rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
+ rcar_du_plane_write(rgrp, index, PnBTR, 0);
+ rcar_du_plane_write(rgrp, index, PnMLR, 0);
}
void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct rcar_du_plane *rplane = to_rcar_plane(plane);
- struct rcar_du_device *rcdu = plane->dev->dev_private;
+ struct rcar_du_device *rcdu = rplane->group->dev;
const struct rcar_du_format_info *format;
unsigned int nplanes;
int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
rcar_du_plane_compute_base(rplane, fb);
rcar_du_plane_setup(rplane);
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rplane->group->planes.lock);
rplane->enabled = true;
rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rplane->group->planes.lock);
return 0;
}
static int rcar_du_plane_disable(struct drm_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev->dev_private;
struct rcar_du_plane *rplane = to_rcar_plane(plane);
if (!rplane->enabled)
return 0;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rplane->group->planes.lock);
rplane->enabled = false;
rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rplane->group->planes.lock);
rcar_du_plane_release(rplane);
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
unsigned int zpos)
{
- struct rcar_du_device *rcdu = plane->dev;
-
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&plane->group->planes.lock);
if (plane->zpos == zpos)
goto done;
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
rcar_du_crtc_update_planes(plane->crtc);
done:
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&plane->group->planes.lock);
}
static int rcar_du_plane_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
- struct rcar_du_device *rcdu = plane->dev->dev_private;
struct rcar_du_plane *rplane = to_rcar_plane(plane);
+ struct rcar_du_group *rgrp = rplane->group;
- if (property == rcdu->planes.alpha)
+ if (property == rgrp->planes.alpha)
rcar_du_plane_set_alpha(rplane, value);
- else if (property == rcdu->planes.colorkey)
+ else if (property == rgrp->planes.colorkey)
rcar_du_plane_set_colorkey(rplane, value);
- else if (property == rcdu->planes.zpos)
+ else if (property == rgrp->planes.zpos)
rcar_du_plane_set_zpos(rplane, value);
else
return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV16,
};
-int rcar_du_plane_init(struct rcar_du_device *rcdu)
+int rcar_du_planes_init(struct rcar_du_group *rgrp)
{
+ struct rcar_du_planes *planes = &rgrp->planes;
+ struct rcar_du_device *rcdu = rgrp->dev;
unsigned int i;
- mutex_init(&rcdu->planes.lock);
- rcdu->planes.free = 0xff;
+ mutex_init(&planes->lock);
+ planes->free = 0xff;
- rcdu->planes.alpha =
+ planes->alpha =
drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
- if (rcdu->planes.alpha == NULL)
+ if (planes->alpha == NULL)
return -ENOMEM;
/* The color key is expressed as an RGB888 triplet stored in a 32-bit
* integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
* or enable source color keying (1).
*/
- rcdu->planes.colorkey =
+ planes->colorkey =
drm_property_create_range(rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
- if (rcdu->planes.colorkey == NULL)
+ if (planes->colorkey == NULL)
return -ENOMEM;
- rcdu->planes.zpos =
+ planes->zpos =
drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
- if (rcdu->planes.zpos == NULL)
+ if (planes->zpos == NULL)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
+ struct rcar_du_plane *plane = &planes->planes[i];
- plane->dev = rcdu;
+ plane->group = rgrp;
plane->hwindex = -1;
plane->alpha = 255;
plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
return 0;
}
-int rcar_du_plane_register(struct rcar_du_device *rcdu)
+int rcar_du_planes_register(struct rcar_du_group *rgrp)
{
+ struct rcar_du_planes *planes = &rgrp->planes;
+ struct rcar_du_device *rcdu = rgrp->dev;
+ unsigned int crtcs;
unsigned int i;
int ret;
+ crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
+
for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
struct rcar_du_kms_plane *plane;
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
if (plane == NULL)
return -ENOMEM;
- plane->hwplane = &rcdu->planes.planes[i + 2];
+ plane->hwplane = &planes->planes[i + 2];
plane->hwplane->zpos = 1;
- ret = drm_plane_init(rcdu->ddev, &plane->plane,
- (1 << rcdu->num_crtcs) - 1,
+ ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_plane_funcs, formats,
ARRAY_SIZE(formats), false);
if (ret < 0)
return ret;
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.alpha, 255);
+ planes->alpha, 255);
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.colorkey,
+ planes->colorkey,
RCAR_DU_COLORKEY_NONE);
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.zpos, 1);
+ planes->zpos, 1);
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5397dba2fe5..f94f9ce8499 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,10 +14,13 @@
#ifndef __RCAR_DU_PLANE_H__
#define __RCAR_DU_PLANE_H__
-struct drm_crtc;
-struct drm_framebuffer;
-struct rcar_du_device;
+#include <linux/mutex.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
struct rcar_du_format_info;
+struct rcar_du_group;
/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
* using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
#define RCAR_DU_NUM_SW_PLANES 9
struct rcar_du_plane {
- struct rcar_du_device *dev;
+ struct rcar_du_group *group;
struct drm_crtc *crtc;
bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
unsigned int dst_y;
};
-int rcar_du_plane_init(struct rcar_du_device *rcdu);
-int rcar_du_plane_register(struct rcar_du_device *rcdu);
+struct rcar_du_planes {
+ struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
+ unsigned int free;
+ struct mutex lock;
+
+ struct drm_property *alpha;
+ struct drm_property *colorkey;
+ struct drm_property *zpos;
+};
+
+int rcar_du_planes_init(struct rcar_du_group *rgrp);
+int rcar_du_planes_register(struct rcar_du_group *rgrp);
+
void rcar_du_plane_setup(struct rcar_du_plane *plane);
void rcar_du_plane_update_base(struct rcar_du_plane *plane);
void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 69f21f19b51..73f7347f740 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -13,14 +13,15 @@
#ifndef __RCAR_DU_REGS_H__
#define __RCAR_DU_REGS_H__
-#define DISP2_REG_OFFSET 0x30000
+#define DU0_REG_OFFSET 0x00000
+#define DU1_REG_OFFSET 0x30000
+#define DU2_REG_OFFSET 0x40000
/* -----------------------------------------------------------------------------
* Display Control Registers
*/
#define DSYSR 0x00000 /* display 1 */
-#define D2SYSR 0x30000 /* display 2 */
#define DSYSR_ILTS (1 << 29)
#define DSYSR_DSEC (1 << 20)
#define DSYSR_IUPD (1 << 16)
@@ -35,7 +36,6 @@
#define DSYSR_SCM_INT_VIDEO (3 << 4)
#define DSMR 0x00004
-#define D2SMR 0x30004
#define DSMR_VSPM (1 << 28)
#define DSMR_ODPM (1 << 27)
#define DSMR_DIPM_DISP (0 << 25)
@@ -60,7 +60,6 @@
#define DSMR_CSY_MASK (3 << 6)
#define DSSR 0x00008
-#define D2SSR 0x30008
#define DSSR_VC1FB_DSA0 (0 << 30)
#define DSSR_VC1FB_DSA1 (1 << 30)
#define DSSR_VC1FB_DSA2 (2 << 30)
@@ -80,7 +79,6 @@
#define DSSR_ADC(n) (1 << ((n)-1))
#define DSRCR 0x0000c
-#define D2SRCR 0x3000c
#define DSRCR_TVCL (1 << 15)
#define DSRCR_FRCL (1 << 14)
#define DSRCR_VBCL (1 << 11)
@@ -90,7 +88,6 @@
#define DSRCR_MASK 0x0000cbff
#define DIER 0x00010
-#define D2IER 0x30010
#define DIER_TVE (1 << 15)
#define DIER_FRE (1 << 14)
#define DIER_VBE (1 << 11)
@@ -114,7 +111,6 @@
#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
#define DEFR 0x00020
-#define D2EFR 0x30020
#define DEFR_CODE (0x7773 << 16)
#define DEFR_EXSL (1 << 12)
#define DEFR_EXVL (1 << 11)
@@ -137,12 +133,10 @@
#define DCPCR_DCE (1 << 0)
#define DEFR2 0x00034
-#define D2EFR2 0x30034
#define DEFR2_CODE (0x7775 << 16)
#define DEFR2_DEFE2G (1 << 0)
#define DEFR3 0x00038
-#define D2EFR3 0x30038
#define DEFR3_CODE (0x7776 << 16)
#define DEFR3_EVDA (1 << 14)
#define DEFR3_EVDM_1 (1 << 12)
@@ -153,7 +147,6 @@
#define DEFR3_DEFE3 (1 << 0)
#define DEFR4 0x0003c
-#define D2EFR4 0x3003c
#define DEFR4_CODE (0x7777 << 16)
#define DEFR4_LRUO (1 << 5)
#define DEFR4_SPCE (1 << 4)
@@ -205,6 +198,68 @@
#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
/* -----------------------------------------------------------------------------
+ * R8A7790-only Control Registers
+ */
+
+#define DD1SSR 0x20008
+#define DD1SSR_TVR (1 << 15)
+#define DD1SSR_FRM (1 << 14)
+#define DD1SSR_BUF (1 << 12)
+#define DD1SSR_VBK (1 << 11)
+#define DD1SSR_RINT (1 << 9)
+#define DD1SSR_HBK (1 << 8)
+#define DD1SSR_ADC(n) (1 << ((n)-1))
+
+#define DD1SRCR 0x2000c
+#define DD1SRCR_TVR (1 << 15)
+#define DD1SRCR_FRM (1 << 14)
+#define DD1SRCR_BUF (1 << 12)
+#define DD1SRCR_VBK (1 << 11)
+#define DD1SRCR_RINT (1 << 9)
+#define DD1SRCR_HBK (1 << 8)
+#define DD1SRCR_ADC(n) (1 << ((n)-1))
+
+#define DD1IER 0x20010
+#define DD1IER_TVR (1 << 15)
+#define DD1IER_FRM (1 << 14)
+#define DD1IER_BUF (1 << 12)
+#define DD1IER_VBK (1 << 11)
+#define DD1IER_RINT (1 << 9)
+#define DD1IER_HBK (1 << 8)
+#define DD1IER_ADC(n) (1 << ((n)-1))
+
+#define DEFR8 0x20020
+#define DEFR8_CODE (0x7790 << 16)
+#define DEFR8_VSCS (1 << 6)
+#define DEFR8_DRGBS_DU(n) ((n) << 4)
+#define DEFR8_DRGBS_MASK (3 << 4)
+#define DEFR8_DEFE8 (1 << 0)
+
+#define DOFLR 0x20024
+#define DOFLR_CODE (0x7790 << 16)
+#define DOFLR_HSYCFL1 (1 << 13)
+#define DOFLR_VSYCFL1 (1 << 12)
+#define DOFLR_ODDFL1 (1 << 11)
+#define DOFLR_DISPFL1 (1 << 10)
+#define DOFLR_CDEFL1 (1 << 9)
+#define DOFLR_RGBFL1 (1 << 8)
+#define DOFLR_HSYCFL0 (1 << 5)
+#define DOFLR_VSYCFL0 (1 << 4)
+#define DOFLR_ODDFL0 (1 << 3)
+#define DOFLR_DISPFL0 (1 << 2)
+#define DOFLR_CDEFL0 (1 << 1)
+#define DOFLR_RGBFL0 (1 << 0)
+
+#define DIDSR 0x20028
+#define DIDSR_CODE (0x7790 << 16)
+#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
+#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
+#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
+#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
+
+/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
*/
@@ -349,21 +404,34 @@
#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
#define APnMWR 0x0a104
+
+#define APnDSXR 0x0a110
+#define APnDSYR 0x0a114
+#define APnDPXR 0x0a118
+#define APnDPYR 0x0a11c
+
#define APnDSA0R 0x0a120
#define APnDSA1R 0x0a124
#define APnDSA2R 0x0a128
+
+#define APnSPXR 0x0a130
+#define APnSPYR 0x0a134
+#define APnWASPR 0x0a138
+#define APnWAMWR 0x0a13c
+
+#define APnBTR 0x0a140
+
#define APnMLR 0x0a150
+#define APnSWAPR 0x0a180
/* -----------------------------------------------------------------------------
* Display Capture Registers
*/
+#define DCMR 0x0c100
#define DCMWR 0x0c104
-#define DC2MWR 0x0c204
#define DCSAR 0x0c120
-#define DC2SAR 0x0c220
#define DCMLR 0x0c150
-#define DC2MLR 0x0c250
/* -----------------------------------------------------------------------------
* Color Palette Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380..41d563adfea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -16,12 +16,9 @@
#include <drm/drm_crtc_helper.h>
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_vga.h"
-
-/* -----------------------------------------------------------------------------
- * Connector
- */
+#include "rcar_du_vgacon.h"
static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
{
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
static enum drm_connector_status
rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_unknown;
+ return connector_status_connected;
}
static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = rcar_du_vga_connector_destroy,
};
-static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
{
struct rcar_du_connector *rcon;
struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return 0;
}
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_vga_encoder_dpms,
- .mode_fixup = rcar_du_vga_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
- .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_vga_data *data,
- unsigned int output)
-{
- struct rcar_du_encoder *renc;
- int ret;
-
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
- return rcar_du_vga_connector_init(rcdu, renc);
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190..b12b0cf7f11 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,5 +1,5 @@
/*
- * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -11,14 +11,13 @@
* (at your option) any later version.
*/
-#ifndef __RCAR_DU_VGA_H__
-#define __RCAR_DU_VGA_H__
+#ifndef __RCAR_DU_VGACON_H__
+#define __RCAR_DU_VGACON_H__
struct rcar_du_device;
-struct rcar_du_encoder_vga_data;
+struct rcar_du_encoder;
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_vga_data *data,
- unsigned int output);
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc);
-#endif /* __RCAR_DU_VGA_H__ */
+#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644
index 00000000000..77cf9289ab6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -0,0 +1,69 @@
+/*
+ * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RCAR_LVDS_REGS_H__
+#define __RCAR_LVDS_REGS_H__
+
+#define LVDCR0 0x0000
+#define LVDCR0_DUSEL (1 << 15)
+#define LVDCR0_DMD (1 << 12)
+#define LVDCR0_LVMD_MASK (0xf << 8)
+#define LVDCR0_LVMD_SHIFT 8
+#define LVDCR0_PLLEN (1 << 4)
+#define LVDCR0_BEN (1 << 2)
+#define LVDCR0_LVEN (1 << 1)
+#define LVDCR0_LVRES (1 << 0)
+
+#define LVDCR1 0x0004
+#define LVDCR1_CKSEL (1 << 15)
+#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
+#define LVDCR1_CLKSTBY (3 << 0)
+
+#define LVDPLLCR 0x0008
+#define LVDPLLCR_CEEN (1 << 14)
+#define LVDPLLCR_FBEN (1 << 13)
+#define LVDPLLCR_COSEL (1 << 12)
+#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
+#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
+#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
+#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
+#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
+
+#define LVDCTRCR 0x000c
+#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
+#define LVDCTRCR_CTR3SEL_ODD (1 << 12)
+#define LVDCTRCR_CTR3SEL_CDE (2 << 12)
+#define LVDCTRCR_CTR3SEL_MASK (7 << 12)
+#define LVDCTRCR_CTR2SEL_DISP (0 << 8)
+#define LVDCTRCR_CTR2SEL_ODD (1 << 8)
+#define LVDCTRCR_CTR2SEL_CDE (2 << 8)
+#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8)
+#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8)
+#define LVDCTRCR_CTR2SEL_MASK (7 << 8)
+#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4)
+#define LVDCTRCR_CTR1SEL_DISP (1 << 4)
+#define LVDCTRCR_CTR1SEL_ODD (2 << 4)
+#define LVDCTRCR_CTR1SEL_CDE (3 << 4)
+#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4)
+#define LVDCTRCR_CTR1SEL_MASK (7 << 4)
+#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0)
+#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0)
+#define LVDCTRCR_CTR0SEL_DISP (2 << 0)
+#define LVDCTRCR_CTR0SEL_ODD (3 << 0)
+#define LVDCTRCR_CTR0SEL_CDE (4 << 0)
+#define LVDCTRCR_CTR0SEL_MASK (7 << 0)
+
+#define LVDCHCR 0x0010
+#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4))
+#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4))
+
+#endif /* __RCAR_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d..b17d0710871 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
drm_idlelock_release(&file_priv->master->lock);
}
-struct drm_ioctl_desc savage_ioctls[] = {
+const struct drm_ioctl_desc savage_ioctls[] = {
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 71b2081e783..3c030216e88 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+ DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6..335f8fcf104 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
S3_LAST
};
-extern struct drm_ioctl_desc savage_ioctls[];
+extern const struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 99e2034e49c..54bad98e947 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct drm_device *dev = scrtc->crtc.dev;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef5..015551866b4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 5a5325e6b75..4383b74a3aa 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_USE_AGP,
.load = sis_driver_load,
.unload = sis_driver_unload,
.open = sis_driver_open,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83b..c31c0253054 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void sis_lastclose(struct drm_device *dev);
-extern struct drm_ioctl_desc sis_ioctls[];
+extern const struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e500..01857d83635 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (pool == AGP_TYPE) {
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
} else {
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
#else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
#endif
}
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
return;
}
-struct drm_ioctl_desc sis_ioctls[] = {
+const struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ddfa743459d..3492ca5c46d 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_MTRR,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d..d36efc13b16 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
struct drm_framebuffer *scanout[2];
/* for deferred fb unref's: */
- DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
- struct work_struct work;
+ struct drm_flip_work unref_work;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
-static void unref_worker(struct work_struct *work)
+static void unref_worker(struct drm_flip_work *work, void *val)
{
struct tilcdc_crtc *tilcdc_crtc =
- container_of(work, struct tilcdc_crtc, work);
+ container_of(work, struct tilcdc_crtc, unref_work);
struct drm_device *dev = tilcdc_crtc->base.dev;
- struct drm_framebuffer *fb;
mutex_lock(&dev->mode_config.mutex);
- while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
};
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
pm_runtime_get_sync(dev->dev);
tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
if (tilcdc_crtc->scanout[n]) {
- if (kfifo_put(&tilcdc_crtc->unref_fifo,
- (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
- struct tilcdc_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &tilcdc_crtc->work);
- } else {
- dev_err(dev->dev, "unref fifo full!\n");
- drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
- }
+ drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
}
tilcdc_crtc->scanout[n] = crtc->fb;
drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,14 +141,15 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
drm_crtc_cleanup(crtc);
- WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
- kfifo_free(&tilcdc_crtc->unref_fifo);
+ drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+
kfree(tilcdc_crtc);
}
static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -379,7 +372,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ /*
+ * use value from adjusted_mode here as this might have been
+ * changed as part of the fixup for slave encoders to solve the
+ * issue where tilcdc timings are not VESA compliant
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +664,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+ ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+ "unref", unref_worker);
if (ret) {
dev_err(dev->dev, "could not allocate unref FIFO\n");
goto fail;
}
- INIT_WORK(&tilcdc_crtc->work, unref_worker);
-
ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a21..116da199b94 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
.debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index a19f657dfa5..595068ba2d5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -72,13 +72,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
}
+static bool slave_encoder_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * tilcdc does not generate VESA-complient sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to let the slave encoder fix it up.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+
static const struct drm_encoder_funcs slave_encoder_funcs = {
.destroy = slave_encoder_destroy,
};
static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
.dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_fixup = slave_encoder_fixup,
.prepare = slave_encoder_prepare,
.commit = drm_i2c_encoder_commit,
.mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670..f1a857ec102 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- write_lock(&bdev->vm_lock);
- if (likely(bo->vm_node != NULL)) {
- rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
- drm_mm_put_block(bo->vm_node);
- bo->vm_node = NULL;
- }
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv;
reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
ret = ttm_bo_check_placement(bo, placement);
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
if (likely(!ret) &&
(bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg))
- ret = ttm_bo_setup_vm(bo);
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
locked = ww_mutex_trylock(&bo->resv->lock);
WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
TTM_DEBUG("Swap list was clean\n");
spin_unlock(&glob->lru_lock);
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_manager_destroy(&bdev->vma_manager);
return ret;
}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
{
int ret = -EINVAL;
- rwlock_init(&bdev->vm_lock);
bdev->driver = driver;
memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- bdev->addr_space_rb = RB_ROOT;
- drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+ drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+ 0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- loff_t offset = (loff_t) bo->addr_space_offset;
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
- if (!bdev->dev_mapping)
- return;
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct rb_node **cur = &bdev->addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct ttm_buffer_object *cur_bo;
- unsigned long offset = bo->vm_node->start;
- unsigned long cur_offset;
-
- while (*cur) {
- parent = *cur;
- cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
- cur_offset = cur_bo->vm_node->start;
- if (offset < cur_offset)
- cur = &parent->rb_left;
- else if (offset > cur_offset)
- cur = &parent->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&bo->vm_rb, parent, cur);
- rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
-retry_pre_get:
- ret = drm_mm_pre_get(&bdev->addr_space_mm);
- if (unlikely(ret != 0))
- return ret;
-
- write_lock(&bdev->vm_lock);
- bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
- bo->mem.num_pages, 0, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
- bo->mem.num_pages, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- write_unlock(&bdev->vm_lock);
- goto retry_pre_get;
- }
-
- ttm_bo_vm_insert_rb(bo);
- write_unlock(&bdev->vm_lock);
- bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
- return 0;
-out_unlock:
- write_unlock(&bdev->vm_lock);
- return ret;
-}
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472..c58eba33bd5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
- do {
- ret = drm_mm_pre_get(mm);
- if (unlikely(ret))
- return ret;
- spin_lock(&rman->lock);
- node = drm_mm_search_free_in_range(mm,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(node == NULL)) {
- spin_unlock(&rman->lock);
- return 0;
- }
- node = drm_mm_get_block_atomic_range(node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&rman->lock);
- } while (node == NULL);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock(&rman->lock);
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn, lpfn,
+ DRM_MM_SEARCH_BEST);
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
- mem->mm_node = node;
- mem->start = node->start;
return 0;
}
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&rman->lock);
- drm_mm_put_block(mem->mm_node);
+ drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
mem->mm_node = NULL;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5..7cc904d3a4d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
- fbo->vm_node = NULL;
+ drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041..1006c15445e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/module.h>
@@ -40,37 +41,6 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
- unsigned long page_start,
- unsigned long num_pages)
-{
- struct rb_node *cur = bdev->addr_space_rb.rb_node;
- unsigned long cur_offset;
- struct ttm_buffer_object *bo;
- struct ttm_buffer_object *best_bo = NULL;
-
- while (likely(cur != NULL)) {
- bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
- cur_offset = bo->vm_node->start;
- if (page_start >= cur_offset) {
- cur = cur->rb_right;
- best_bo = bo;
- if (page_start == cur_offset)
- break;
- } else
- cur = cur->rb_left;
- }
-
- if (unlikely(best_bo == NULL))
- return NULL;
-
- if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
- (page_start + num_pages)))
- return NULL;
-
- return best_bo;
-}
-
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
page_last = vma_pages(vma) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.close = ttm_bo_vm_close
};
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+ unsigned long offset,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+ struct ttm_buffer_object *bo = NULL;
+
+ drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+ node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ if (likely(node)) {
+ bo = container_of(node, struct ttm_buffer_object, vma_node);
+ if (!kref_get_unless_zero(&bo->kref))
+ bo = NULL;
+ }
+
+ drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+ if (!bo)
+ pr_err("Could not find buffer object to map\n");
+
+ return bo;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
- vma_pages(vma));
- if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
- bo = NULL;
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL)) {
- pr_err("Could not find buffer object to map\n");
+ bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+ if (unlikely(!bo))
return -EINVAL;
- }
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
-
-
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf, size_t count,
- loff_t *f_pos, bool write)
-{
- struct ttm_buffer_object *bo;
- struct ttm_bo_driver *driver;
- struct ttm_bo_kmap_obj map;
- unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL))
- return -EFAULT;
-
- driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
- ret = -EPERM;
- goto out_unref;
- }
-
- ret = driver->verify_access(bo, filp);
- if (unlikely(ret != 0))
- goto out_unref;
-
- kmap_offset = dev_offset - bo->vm_node->start;
- if (unlikely(kmap_offset >= bo->num_pages)) {
- ret = -EFBIG;
- goto out_unref;
- }
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- ret = -EAGAIN;
- goto out_unref;
- default:
- goto out_unref;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- goto out_unref;
- }
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return -EFBIG;
-
- *f_pos += io_size;
-
- return io_size;
-out_unref:
- ttm_bo_unref(&bo);
- return ret;
-}
-
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
- char __user *rbuf, size_t count, loff_t *f_pos,
- bool write)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- kmap_offset = (*f_pos >> PAGE_SHIFT);
- if (unlikely(kmap_offset >= bo->num_pages))
- return -EFBIG;
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- return -EAGAIN;
- default:
- return ret;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- return ret;
- }
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return ret;
-
- *f_pos += io_size;
-
- return io_size;
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74..7650dc0d78c 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
- .dumb_destroy = udl_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c7..56aec9409fa 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f..8dbe9d0ae9a 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
{
- int page_count, i;
- struct page *page;
- struct inode *inode;
- struct address_space *mapping;
+ struct page **pages;
if (obj->pages)
return 0;
- page_count = obj->base.size / PAGE_SIZE;
- BUG_ON(obj->pages != NULL);
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
- return -ENOMEM;
+ pages = drm_gem_get_pages(&obj->base, gfpmask);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- inode = file_inode(obj->base.filp);
- mapping = inode->i_mapping;
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(page))
- goto err_pages;
- obj->pages[i] = page;
- }
+ obj->pages = pages;
return 0;
-err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return PTR_ERR(page);
}
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
- int page_count = obj->base.size / PAGE_SIZE;
- int i;
-
- if (obj->base.import_attach) {
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return;
- }
-
- for (i = 0; i < page_count; i++)
- page_cache_release(obj->pages[i]);
-
- drm_free_large(obj->pages);
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->pages)
udl_gem_put_pages(obj);
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
}
/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
goto out;
- if (!gobj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d719525..f5ae57406f3 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%*ph\n",
- total_len, 11, desc);
+ DRM_INFO("vendor descriptor length:%x data:%11ph\n",
+ total_len, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a242..652f9b43ec9 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
return ret;
}
-struct drm_ioctl_desc via_ioctls[] = {
+const struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index f4ae2032794..92684a9b7e3 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+ DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c3..a811ef2b505 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
-extern struct drm_ioctl_desc via_ioctls[];
+extern const struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff0987..7e3ad87c366 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (mem->type == VIA_MEM_AGP)
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
if (retval)
goto fail_alloc;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48..1a90f0a2f7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
* Ioctl definitions.
*/
-static struct drm_ioctl_desc vmw_ioctls[] = {
+static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->fman = vmw_fence_manager_init(dev_priv);
- if (unlikely(dev_priv->fman == NULL))
+ if (unlikely(dev_priv->fman == NULL)) {
+ ret = -ENOMEM;
goto out_no_fman;
+ }
vmw_kms_save_vga(dev_priv);
@@ -782,7 +784,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- struct drm_ioctl_desc *ioctl =
+ const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +797,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
return drm_ioctl(filp, cmd, arg);
}
-static int vmw_firstopen(struct drm_device *dev)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- dev_priv->is_opened = true;
-
- return 0;
-}
-
static void vmw_lastclose(struct drm_device *dev)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
- /**
- * Do nothing on the lastclose call from drm_unload.
- */
-
- if (!dev_priv->is_opened)
- return;
-
- dev_priv->is_opened = false;
set.x = 0;
set.y = 0;
set.fb = NULL;
@@ -1120,7 +1105,6 @@ static const struct file_operations vmwgfx_driver_fops = {
.mmap = vmw_mmap,
.poll = vmw_fops_poll,
.read = vmw_fops_read,
- .fasync = drm_fasync,
#if defined(CONFIG_COMPAT)
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -1132,7 +1116,6 @@ static struct drm_driver driver = {
DRIVER_MODESET,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
- .firstopen = vmw_firstopen,
.lastclose = vmw_lastclose,
.irq_preinstall = vmw_irq_preinstall,
.irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1126,6 @@ static struct drm_driver driver = {
.disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
- .dma_quiescent = NULL, /*vmw_dma_quiescent, */
.master_create = vmw_master_create,
.master_destroy = vmw_master_destroy,
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 13aeda71280..150ec64af61 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -324,7 +324,6 @@ struct vmw_private {
*/
bool stealth;
- bool is_opened;
bool enable_fb;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d4607b2530d..fc43c060123 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6fa89c9d621..8d038c36bd5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -123,7 +123,8 @@ struct vmw_display_unit {
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b6..0e67cf41065 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_dmabuf;
rep->handle = handle;
- rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0)
return -EINVAL;
- *offset = out_buf->base.addr_space_offset;
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
vmw_dmabuf_unreference(&out_buf);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 28e28a23d44..47163029987 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -43,7 +43,7 @@ void host1x_set_drm_data(struct device *dev, void *data)
void *host1x_get_drm_data(struct device *dev)
{
struct host1x *host1x = dev_get_drvdata(dev);
- return host1x->drm_data;
+ return host1x ? host1x->drm_data : NULL;
}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 790ddf114e5..bed90a8131b 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -301,8 +301,8 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
-extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_gr2d_driver;
#endif
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 5360e5a57ec..b1a05ad901c 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -235,7 +235,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
}
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faac..8c61ceeaa12 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
bo = to_tegra_bo(gem);
- args->offset = tegra_bo_get_mmap_offset(bo);
+ args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_unreference(gem);
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
}
#endif
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = tegra_drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
.gem_vm_ops = &tegra_bo_vm_ops,
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = tegra_bo_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c..59623de4ee1 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
-{
- return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
-}
-
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
{
struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
- if (gem->map_list.map)
- drm_gem_free_mmap_offset(gem);
+ drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
bo = to_tegra_bo(gem);
- *offset = tegra_bo_get_mmap_offset(bo);
+ *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_unreference(gem);
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb..492533a2dac 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
unsigned int size,
unsigned int *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
uint32_t handle, uint64_t *offset);
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index 01097da09f7..644d95c7d48 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -551,24 +551,8 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
return;
}
- memset(&frame, 0, sizeof(frame));
-
- frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
- frame.version = 0x01;
- frame.length = 6;
-
- frame.data[0] = 0x03; /* regid0 */
- frame.data[1] = 0x0c; /* regid1 */
- frame.data[2] = 0x00; /* regid2 */
- frame.data[3] = 0x02 << 5; /* video format */
-
- /* TODO: 74 MHz limit? */
- if (1) {
- frame.data[4] = 0x00 << 4; /* 3D structure */
- } else {
- frame.data[4] = 0x08 << 4; /* 3D structure */
- frame.data[5] = 0x00 << 4; /* 3D ext. data */
- }
+ hdmi_vendor_infoframe_init(&frame);
+ frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
@@ -904,6 +888,11 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_hdmi *hdmi = node->info_ent->data;
+ int err;
+
+ err = clk_enable(hdmi->clk);
+ if (err)
+ return err;
#define DUMP_REG(name) \
seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
@@ -1069,6 +1058,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
+ clk_disable(hdmi->clk);
+
return 0;
}
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f2026..5aa66ef7a94 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
@@ -147,6 +147,13 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
if (!rgb)
return -ENOMEM;
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+
+ err = tegra_output_parse_dt(&rgb->output);
+ if (err < 0)
+ return err;
+
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
@@ -165,13 +172,6 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return err;
}
- rgb->output.dev = dc->dev;
- rgb->output.of_node = np;
-
- err = tegra_output_parse_dt(&rgb->output);
- if (err < 0)
- return err;
-
dc->rgb = &rgb->output;
return 0;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index cc807667d8f..c4e1050f225 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -42,12 +42,12 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
/* Check that we're not going to overflow */
total = sizeof(struct host1x_job) +
- num_relocs * sizeof(struct host1x_reloc) +
- num_unpins * sizeof(struct host1x_job_unpin_data) +
- num_waitchks * sizeof(struct host1x_waitchk) +
- num_cmdbufs * sizeof(struct host1x_job_gather) +
- num_unpins * sizeof(dma_addr_t) +
- num_unpins * sizeof(u32 *);
+ (u64)num_relocs * sizeof(struct host1x_reloc) +
+ (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
+ (u64)num_waitchks * sizeof(struct host1x_waitchk) +
+ (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
+ (u64)num_unpins * sizeof(dma_addr_t) +
+ (u64)num_unpins * sizeof(u32 *);
if (total > ULONG_MAX)
return NULL;
@@ -466,9 +466,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped) {
- int err = PTR_ERR(job->gather_copy_mapped);
job->gather_copy_mapped = NULL;
- return err;
+ return -ENOMEM;
}
job->gather_copy_size = size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf787e1d932..ec0ae2d1686 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/console.h>
#include <linux/vga_switcheroo.h>
+#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
@@ -37,6 +38,7 @@ struct vga_switcheroo_client {
const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ bool driver_power_control;
struct list_head list;
};
@@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active)
+ int id, bool active, bool driver_power_control)
{
struct vga_switcheroo_client *client;
@@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev,
client->ops = ops;
client->id = id;
client->active = active;
+ client->driver_power_control = driver_power_control;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
@@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev,
}
int vga_switcheroo_register_client(struct pci_dev *pdev,
- const struct vga_switcheroo_client_ops *ops)
+ const struct vga_switcheroo_client_ops *ops,
+ bool driver_power_control)
{
return register_client(pdev, ops, -1,
- pdev == vga_default_device());
+ pdev == vga_default_device(), driver_power_control);
}
EXPORT_SYMBOL(vga_switcheroo_register_client);
@@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id, bool active)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
@@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
int i = 0;
mutex_lock(&vgasr_mutex);
list_for_each_entry(client, &vgasr_priv.clients, list) {
- seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
+ client->driver_power_control ? "Dyn" : "",
client->pwr_state ? "Pwr" : "Off",
pci_name(client->pdev));
i++;
@@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
static int vga_switchon(struct vga_switcheroo_client *client)
{
+ if (client->driver_power_control)
+ return 0;
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
@@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
+ if (client->driver_power_control)
+ return 0;
/* call the driver callback to turn off device */
client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
@@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
+ if (client->driver_power_control)
+ continue;
set_audio_state(client->id, VGA_SWITCHEROO_OFF);
if (client->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(client);
@@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
+ if (client->driver_power_control)
+ continue;
if (client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(client);
set_audio_state(client->id, VGA_SWITCHEROO_ON);
@@ -565,3 +578,127 @@ err:
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
+
+static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct vga_switcheroo_client *client;
+
+ if (!vgasr_priv.handler->power_state)
+ return;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return;
+
+ if (!client->driver_power_control)
+ return;
+
+ vgasr_priv.handler->power_state(client->id, state);
+}
+
+/* force a PCI device to a certain state - mainly to turn off audio clients */
+
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return;
+
+ if (!client->driver_power_control)
+ return;
+
+ client->pwr_state = dynamic;
+ set_audio_state(client->id, dynamic);
+}
+EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
+
+/* switcheroo power domain */
+static int vga_switcheroo_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = dev->bus->pm->runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+ return 0;
+}
+
+static int vga_switcheroo_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
+ ret = dev->bus->pm->runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* this version is for the case where the power switch is separate
+ to the device being powered down. */
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+{
+ /* copy over all the bus versions */
+ if (dev->bus && dev->bus->pm) {
+ domain->ops = *dev->bus->pm;
+ domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
+ domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
+
+ dev->pm_domain = domain;
+ return 0;
+ }
+ dev->pm_domain = NULL;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+
+static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ struct vga_switcheroo_client *client, *found = NULL;
+
+ /* we need to check if we have to switch back on the video
+ device so the audio device can come back */
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+ found = client;
+ ret = pm_runtime_get_sync(&client->pdev->dev);
+ if (ret) {
+ if (ret != 1)
+ return ret;
+ }
+ break;
+ }
+ }
+ ret = dev->bus->pm->runtime_resume(dev);
+
+ /* put the reference for the gpu */
+ if (found) {
+ pm_runtime_mark_last_busy(&found->pdev->dev);
+ pm_runtime_put_autosuspend(&found->pdev->dev);
+ }
+ return ret;
+}
+
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+{
+ /* copy over all the bus versions */
+ if (dev->bus && dev->bus->pm) {
+ domain->ops = *dev->bus->pm;
+ domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+
+ dev->pm_domain = domain;
+ return 0;
+ }
+ dev->pm_domain = NULL;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 878f1688210..47c5888461f 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -199,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_cma_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
};
@@ -775,7 +774,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
}
EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
@@ -788,7 +787,7 @@ static struct drm_driver imx_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index bae0e2e8191..6fd37a7453e 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
static int ipu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
int ret;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 40178338b61..9e758a8f890 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -22,6 +22,7 @@
*/
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
@@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
- frame->length = 13;
+ frame->length = HDMI_AVI_INFOFRAME_SIZE;
return 0;
}
@@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@@ -95,13 +96,18 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
- if (frame->active_info_valid)
+ /*
+ * Data byte 1, bit 4 has to be set if we provide the active format
+ * aspect ratio
+ */
+ if (frame->active_aspect & 0xf)
ptr[0] |= BIT(4);
- if (frame->horizontal_bar_valid)
+ /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
+ if (frame->top_bar || frame->bottom_bar)
ptr[0] |= BIT(3);
- if (frame->vertical_bar_valid)
+ if (frame->left_bar || frame->right_bar)
ptr[0] |= BIT(2);
ptr[1] = ((frame->colorimetry & 0x3) << 6) |
@@ -151,7 +157,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
- frame->length = 25;
+ frame->length = HDMI_SPD_INFOFRAME_SIZE;
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +191,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@@ -218,7 +224,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
- frame->length = 10;
+ frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
return 0;
}
@@ -250,7 +256,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
if (frame->channels >= 2)
channels = frame->channels - 1;
@@ -282,9 +288,33 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
- * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
- * buffer
+ * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame->version = 1;
+
+ frame->oui = HDMI_IEEE_OUI;
+
+ /*
+ * 0 is a valid value for s3d_struct, so we use a special "not set"
+ * value
+ */
+ frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
@@ -297,27 +327,110 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
* error code on failure.
*/
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
- void *buffer, size_t size)
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ /* empty info frame */
+ if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ frame->length = 6;
+ else
+ frame->length = 5;
+
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
- memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
+ /* HDMI OUI */
+ ptr[4] = 0x03;
+ ptr[5] = 0x0c;
+ ptr[6] = 0x00;
+
+ if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x2 << 5; /* video format */
+ ptr[8] = (frame->s3d_struct & 0xf) << 4;
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ }
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+}
+
+/**
+ * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);