diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
70 files changed, 9175 insertions, 2388 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index b5713eedd6e..1cc7b937b1e 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable $(call if_changed,mkregtable) +$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable + $(call if_changed,mkregtable) + $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable $(call if_changed,mkregtable) @@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h $(obj)/r300.o: $(obj)/r300_reg_safe.h +$(obj)/r420.o: $(obj)/r420_reg_safe.h + $(obj)/rs600.o: $(obj)/rs600_reg_safe.h radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ @@ -49,7 +54,7 @@ radeon-y += radeon_device.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ - r600_blit_kms.o radeon_pm.o + r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index 6d0183c61d3..c714179d1bf 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h @@ -1,5 +1,5 @@ /* -* Copyright 2006-2007 Advanced Micro Devices, Inc. +* Copyright 2006-2007 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -41,14 +41,14 @@ /****************************************************/ /* Encoder Object ID Definition */ /****************************************************/ -#define ENCODER_OBJECT_ID_NONE 0x00 +#define ENCODER_OBJECT_ID_NONE 0x00 /* Radeon Class Display Hardware */ #define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 #define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 #define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 #define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 -#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ +#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ #define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 #define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 @@ -56,11 +56,11 @@ #define ENCODER_OBJECT_ID_SI170B 0x08 #define ENCODER_OBJECT_ID_CH7303 0x09 #define ENCODER_OBJECT_ID_CH7301 0x0A -#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ +#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D #define ENCODER_OBJECT_ID_TITFP513 0x0E -#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ +#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ #define ENCODER_OBJECT_ID_VT1623 0x10 #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 @@ -68,9 +68,9 @@ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 -#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ -#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ -#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ +#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ +#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ +#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ #define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 #define ENCODER_OBJECT_ID_VT1625 0x1A #define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B @@ -86,7 +86,7 @@ /****************************************************/ /* Connector Object ID Definition */ /****************************************************/ -#define CONNECTOR_OBJECT_ID_NONE 0x00 +#define CONNECTOR_OBJECT_ID_NONE 0x00 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 @@ -96,7 +96,7 @@ #define CONNECTOR_OBJECT_ID_SVIDEO 0x07 #define CONNECTOR_OBJECT_ID_YPbPr 0x08 #define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 -#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ +#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ #define CONNECTOR_OBJECT_ID_SCART 0x0B #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D @@ -106,6 +106,8 @@ #define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 #define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 +#define CONNECTOR_OBJECT_ID_eDP 0x14 +#define CONNECTOR_OBJECT_ID_MXM 0x15 /* deleted */ @@ -116,6 +118,14 @@ #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 /****************************************************/ +/* Generic Object ID Definition */ +/****************************************************/ +#define GENERIC_OBJECT_ID_NONE 0x00 +#define GENERIC_OBJECT_ID_GLSYNC 0x01 +#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 +#define GENERIC_OBJECT_ID_MXM_OPM 0x03 + +/****************************************************/ /* Graphics Object ENUM ID Definition */ /****************************************************/ #define GRAPH_OBJECT_ENUM_ID1 0x01 @@ -124,6 +134,7 @@ #define GRAPH_OBJECT_ENUM_ID4 0x04 #define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID6 0x06 +#define GRAPH_OBJECT_ENUM_ID7 0x07 /****************************************************/ /* Graphics Object ID Bit definition */ @@ -133,35 +144,35 @@ #define RESERVED1_ID_MASK 0x0800 #define OBJECT_TYPE_MASK 0x7000 #define RESERVED2_ID_MASK 0x8000 - + #define OBJECT_ID_SHIFT 0x00 #define ENUM_ID_SHIFT 0x08 #define OBJECT_TYPE_SHIFT 0x0C + /****************************************************/ /* Graphics Object family definition */ /****************************************************/ -#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \ - (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ - GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) +#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ + GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) /****************************************************/ /* GPU Object ID definition - Shared with BIOS */ /****************************************************/ -#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) +#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) /****************************************************/ /* Encoder Object ID definition - Shared with BIOS */ /****************************************************/ /* -#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 +#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 #define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 #define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 -#define ENCODER_SIL170B_ENUM_ID1 0x2108 +#define ENCODER_SIL170B_ENUM_ID1 0x2108 #define ENCODER_CH7303_ENUM_ID1 0x2109 #define ENCODER_CH7301_ENUM_ID1 0x210A #define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B @@ -175,8 +186,8 @@ #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 -#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 -#define ENCODER_SI178_ENUM_ID1 0x2117 +#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 +#define ENCODER_SI178_ENUM_ID1 0x2117 #define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 #define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 #define ENCODER_VT1625_ENUM_ID1 0x211A @@ -185,205 +196,169 @@ #define ENCODER_DP_DP501_ENUM_ID1 0x211D #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E */ -#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) - -#define ENCODER_SIL170B_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) - -#define ENCODER_CH7303_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) - -#define ENCODER_CH7301_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) - -#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) - -#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) - -#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) - -#define ENCODER_TITFP513_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) - -#define ENCODER_VT1623_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) - -#define ENCODER_HDMI_SI1930_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) - -#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */ - -#define ENCODER_SI178_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) - -#define ENCODER_MVPU_FPGA_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_DDI_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) - -#define ENCODER_VT1625_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) - -#define ENCODER_HDMI_SI1932_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) - -#define ENCODER_DP_DP501_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) - -#define ENCODER_DP_AN9801_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) - -#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) - -#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) +#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) + +#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) + +#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) + +#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) + +#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) + +#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) + + +#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) + + +#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) + +#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) + +#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) + +#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) + + +#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) + + +#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT + +#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) + +#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) + +#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) + +#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) + +#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) + +#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) + +#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) + +#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ @@ -406,167 +381,253 @@ #define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 */ -#define CONNECTOR_LVDS_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) - -#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) - -#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) - -#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) - -#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) - -#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) - -#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) - -#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) - -#define CONNECTOR_VGA_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) - -#define CONNECTOR_VGA_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) - -#define CONNECTOR_COMPOSITE_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) - -#define CONNECTOR_SVIDEO_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) - -#define CONNECTOR_YPbPr_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) - -#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) - -#define CONNECTOR_9PIN_DIN_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) - -#define CONNECTOR_SCART_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) - -#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) - -#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) - -#define CONNECTOR_7PIN_DIN_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) - -#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) - -#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) - -#define CONNECTOR_CROSSFIRE_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) - -#define CONNECTOR_CROSSFIRE_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) - -#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) - -#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) - -#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) - -#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) - -#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) - -#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \ - (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ - CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) +#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) + +#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) + +#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT) + +#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) + +#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) + +#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) + +#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) + +#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) + +#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) + +#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) + +#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) + +#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) + +#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) + +#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) + +#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) + +#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) + +#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) + +#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) + +#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) + +#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) + +#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) + +#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) + +#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) + +#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) + +#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) +#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) + +#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) + +#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) + +#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) + +#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) + + +#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) + +#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) + +#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A + +#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B + +#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C + +#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D + +#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx + +#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx + +#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC /****************************************************/ /* Router Object ID definition - Shared with BIOS */ /****************************************************/ -#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \ - (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ - GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ - ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) +#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) /* deleted */ /****************************************************/ +/* Generic Object ID definition - Shared with BIOS */ +/****************************************************/ +#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT) + +#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT) + +#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT) + +#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) + +/****************************************************/ /* Object Cap definition - Shared with BIOS */ /****************************************************/ #define GRAPHICS_OBJECT_CAP_I2C 0x00000001L #define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L + #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 @@ -575,4 +636,8 @@ #pragma pack() #endif -#endif /*GRAPHICTYPE */ +#endif /*GRAPHICTYPE */ + + + + diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d67c42555ab..e3b44562d26 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -58,6 +58,7 @@ typedef struct { } atom_exec_context; int atom_debug = 0; +static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); static uint32_t atom_arg_mask[8] = @@ -245,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_WS_ATTRIBUTES: val = gctx->io_attr; break; + case ATOM_WS_REGPTR: + val = gctx->reg_block; + break; default: val = ctx->ws[idx]; } @@ -263,10 +267,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; + val = gctx->scratch[((gctx->fb_base + idx) / 4)]; if (print) DEBUG("FB[0x%02X]", idx); - printk(KERN_INFO "FB access is not implemented.\n"); - return 0; + break; case ATOM_ARG_IMM: switch (align) { case ATOM_SRC_DWORD: @@ -384,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) return atom_get_src_int(ctx, attr, ptr, NULL, 1); } +static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) +{ + uint32_t val = 0xCDCDCDCD; + + switch (align) { + case ATOM_SRC_DWORD: + val = U32(*ptr); + (*ptr) += 4; + break; + case ATOM_SRC_WORD0: + case ATOM_SRC_WORD8: + case ATOM_SRC_WORD16: + val = U16(*ptr); + (*ptr) += 2; + break; + case ATOM_SRC_BYTE0: + case ATOM_SRC_BYTE8: + case ATOM_SRC_BYTE16: + case ATOM_SRC_BYTE24: + val = U8(*ptr); + (*ptr)++; + break; + } + return val; +} + static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t *saved, int print) { @@ -481,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, case ATOM_WS_ATTRIBUTES: gctx->io_attr = val; break; + case ATOM_WS_REGPTR: + gctx->reg_block = val; + break; default: ctx->ws[idx] = val; } @@ -488,9 +521,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; + gctx->scratch[((gctx->fb_base + idx) / 4)] = val; DEBUG("FB[0x%02X]", idx); - printk(KERN_INFO "FB access is not implemented.\n"); - return; + break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; @@ -573,7 +606,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) - atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift); + atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); } static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) @@ -676,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src1: "); - src1 = atom_get_src(ctx, attr, ptr); + src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); SDEBUG(" src2: "); src2 = atom_get_src(ctx, attr, ptr); dst &= src1; @@ -808,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); } +static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) +{ + uint8_t attr = U8((*ptr)++), shift; + uint32_t saved, dst; + int dptr = *ptr; + attr &= 0x38; + attr |= atom_def_dst[attr >> 3] << 6; + SDEBUG(" dst: "); + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); + SDEBUG(" shift: %d\n", shift); + dst <<= shift; + SDEBUG(" dst: "); + atom_put_dst(ctx, arg, attr, &dptr, dst, saved); +} + +static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) +{ + uint8_t attr = U8((*ptr)++), shift; + uint32_t saved, dst; + int dptr = *ptr; + attr &= 0x38; + attr |= atom_def_dst[attr >> 3] << 6; + SDEBUG(" dst: "); + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); + SDEBUG(" shift: %d\n", shift); + dst >>= shift; + SDEBUG(" dst: "); + atom_put_dst(ctx, arg, attr, &dptr, dst, saved); +} + static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; @@ -817,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); - shift = U8((*ptr)++); + shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; SDEBUG(" dst: "); @@ -833,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); - shift = U8((*ptr)++); + shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; SDEBUG(" dst: "); @@ -936,18 +1001,18 @@ static struct { atom_op_or, ATOM_ARG_FB}, { atom_op_or, ATOM_ARG_PLL}, { atom_op_or, ATOM_ARG_MC}, { - atom_op_shl, ATOM_ARG_REG}, { - atom_op_shl, ATOM_ARG_PS}, { - atom_op_shl, ATOM_ARG_WS}, { - atom_op_shl, ATOM_ARG_FB}, { - atom_op_shl, ATOM_ARG_PLL}, { - atom_op_shl, ATOM_ARG_MC}, { - atom_op_shr, ATOM_ARG_REG}, { - atom_op_shr, ATOM_ARG_PS}, { - atom_op_shr, ATOM_ARG_WS}, { - atom_op_shr, ATOM_ARG_FB}, { - atom_op_shr, ATOM_ARG_PLL}, { - atom_op_shr, ATOM_ARG_MC}, { + atom_op_shift_left, ATOM_ARG_REG}, { + atom_op_shift_left, ATOM_ARG_PS}, { + atom_op_shift_left, ATOM_ARG_WS}, { + atom_op_shift_left, ATOM_ARG_FB}, { + atom_op_shift_left, ATOM_ARG_PLL}, { + atom_op_shift_left, ATOM_ARG_MC}, { + atom_op_shift_right, ATOM_ARG_REG}, { + atom_op_shift_right, ATOM_ARG_PS}, { + atom_op_shift_right, ATOM_ARG_WS}, { + atom_op_shift_right, ATOM_ARG_FB}, { + atom_op_shift_right, ATOM_ARG_PLL}, { + atom_op_shift_right, ATOM_ARG_MC}, { atom_op_mul, ATOM_ARG_REG}, { atom_op_mul, ATOM_ARG_PS}, { atom_op_mul, ATOM_ARG_WS}, { @@ -1040,7 +1105,7 @@ static struct { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; -void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; @@ -1057,8 +1122,6 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); - /* reset reg block */ - ctx->reg_block = 0; ectx.ctx = ctx; ectx.ps_shift = ps / 4; ectx.start = base; @@ -1092,6 +1155,19 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) kfree(ectx.ws); } +void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +{ + mutex_lock(&ctx->mutex); + /* reset reg block */ + ctx->reg_block = 0; + /* reset fb window */ + ctx->fb_base = 0; + /* reset io mode */ + ctx->io_mode = ATOM_IO_MM; + atom_execute_table_locked(ctx, index, params); + mutex_unlock(&ctx->mutex); +} + static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static void atom_index_iio(struct atom_context *ctx, int base) @@ -1214,3 +1290,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, *crev = CU8(idx + 3); return; } + +int atom_allocate_fb_scratch(struct atom_context *ctx) +{ + int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); + uint16_t data_offset; + int usage_bytes; + struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; + + atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); + + firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); + + DRM_DEBUG("atom firmware requested %08x %dkb\n", + firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, + firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); + + usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + if (usage_bytes == 0) + usage_bytes = 20 * 1024; + /* allocate some scratch memory */ + ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); + if (!ctx->scratch) + return -ENOMEM; + return 0; +} diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index e6eb38f2bca..bc73781423a 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -91,6 +91,7 @@ #define ATOM_WS_AND_MASK 0x45 #define ATOM_WS_FB_WINDOW 0x46 #define ATOM_WS_ATTRIBUTES 0x47 +#define ATOM_WS_REGPTR 0x48 #define ATOM_IIO_NOP 0 #define ATOM_IIO_START 1 @@ -120,6 +121,7 @@ struct card_info { struct atom_context { struct card_info *card; + struct mutex mutex; void *bios; uint32_t cmd_table, data_table; uint16_t *iio; @@ -132,6 +134,7 @@ struct atom_context { uint8_t shift; int cs_equal, cs_above; int io_mode; + uint32_t *scratch; }; extern int atom_debug; @@ -142,6 +145,7 @@ int atom_asic_init(struct atom_context *); void atom_destroy(struct atom_context *); void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); +int atom_allocate_fb_scratch(struct atom_context *ctx); #include "atom-types.h" #include "atombios.h" #include "ObjectID.h" diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 6643afc36ce..91ad0d1c1b1 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD { typedef struct _ATOM_HPD_INT_RECORD { ATOM_COMMON_RECORD_HEADER sheader; UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ - UCHAR ucPluggged_PinState; + UCHAR ucPlugged_PinState; } ATOM_HPD_INT_RECORD; typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { @@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 { ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; } ATOM_POWERPLAY_INFO_V3; +/* New PPlib */ +/**************************************************************************/ +typedef struct _ATOM_PPLIB_THERMALCONTROLLER + +{ + UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* + UCHAR ucI2cLine; // as interpreted by DAL I2C + UCHAR ucI2cAddress; + UCHAR ucFanParameters; // Fan Control Parameters. + UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. + UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. + UCHAR ucReserved; // ---- + UCHAR ucFlags; // to be defined +} ATOM_PPLIB_THERMALCONTROLLER; + +#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. + +#define ATOM_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_LM64 5 +#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib +#define ATOM_PP_THERMALCONTROLLER_RV6xx 7 +#define ATOM_PP_THERMALCONTROLLER_RV770 8 +#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 + +typedef struct _ATOM_PPLIB_STATE +{ + UCHAR ucNonClockStateIndex; + UCHAR ucClockStateIndices[1]; // variable-sized +} ATOM_PPLIB_STATE; + +//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps +#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 +#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 +#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 +#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 +#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 +#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 +#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 +#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 +#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 +#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 +#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 +#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + + UCHAR ucDataRevision; + + UCHAR ucNumStates; + UCHAR ucStateEntrySize; + UCHAR ucClockInfoSize; + UCHAR ucNonClockSize; + + // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures + USHORT usStateArrayOffset; + + // offset from start of this table to array of ASIC-specific structures, + // currently ATOM_PPLIB_CLOCK_INFO. + USHORT usClockInfoArrayOffset; + + // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO + USHORT usNonClockInfoArrayOffset; + + USHORT usBackbiasTime; // in microseconds + USHORT usVoltageTime; // in microseconds + USHORT usTableSize; //the size of this structure, or the extended structure + + ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* + + ATOM_PPLIB_THERMALCONTROLLER sThermalController; + + USHORT usBootClockInfoOffset; + USHORT usBootNonClockInfoOffset; + +} ATOM_PPLIB_POWERPLAYTABLE; + +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +// 2, 4, 6, 7 are reserved + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 +#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 +#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 +#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 +// remaining 3 bits are reserved + +//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings +#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 +#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 + +// 0 is 2.5Gb/s, 1 is 5Gb/s +#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 +#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 + +// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec +#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 +#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 + +// lookup into reduced refresh-rate table +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 + +#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 +#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 +// 2-15 TBD as needed. + +#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 +#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 +#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 + +#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex +typedef struct _ATOM_PPLIB_NONCLOCK_INFO +{ + USHORT usClassification; + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + ULONG ulCapsAndSettings; + UCHAR ucRequiredPower; + UCHAR ucUnused1[3]; +} ATOM_PPLIB_NONCLOCK_INFO; + +// Contained in an array starting at the offset +// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. +// referenced from ATOM_PPLIB_STATE::ucClockStateIndices +typedef struct _ATOM_PPLIB_R600_CLOCK_INFO +{ + USHORT usEngineClockLow; + UCHAR ucEngineClockHigh; + + USHORT usMemoryClockLow; + UCHAR ucMemoryClockHigh; + + USHORT usVDDC; + USHORT usUnused1; + USHORT usUnused2; + + ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* + +} ATOM_PPLIB_R600_CLOCK_INFO; + +// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO +#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 +#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 +#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 +#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 + +typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO + +{ + USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). + UCHAR ucLowEngineClockHigh; + USHORT usHighEngineClockLow; // High Engine clock in MHz. + UCHAR ucHighEngineClockHigh; + USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. + UCHAR ucMemoryClockHigh; // Currentyl unused. + UCHAR ucPadding; // For proper alignment and size. + USHORT usVDDC; // For the 780, use: None, Low, High, Variable + UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} + UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. + USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). + ULONG ulFlags; +} ATOM_PPLIB_RS780_CLOCK_INFO; + +#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 +#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 +#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 +#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 + +#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. +#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 +#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 + +#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 +#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 +#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 + /**************************************************************************/ /* Following definitions are for compatiblity issue in different SW components. */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c15287a590f..af464e351fb 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); switch (mode) { case DRM_MODE_DPMS_ON: @@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, 1); atombios_blank_crtc(crtc, 0); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); atombios_blank_crtc(crtc, 1); if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, 0); atombios_enable_crtc(crtc, 0); break; } - - if (mode != DRM_MODE_DPMS_OFF) { - radeon_crtc_load_lut(crtc); - } } static void @@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; - printk("executing set crtc dtd timing\n"); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = radeon_crtc->crtc_id; - printk("executing set crtc timing\n"); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -409,60 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) } } -void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +union adjust_pixel_clock { + ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; +}; + +static u32 atombios_adjust_pll(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct radeon_pll *pll) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder = NULL; struct radeon_encoder *radeon_encoder = NULL; - uint8_t frev, crev; - int index; - SET_PIXEL_CLOCK_PS_ALLOCATION args; - PIXEL_CLOCK_PARAMETERS *spc1_ptr; - PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; - PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; - uint32_t pll_clock = mode->clock; - uint32_t adjusted_clock; - uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; - struct radeon_pll *pll; - int pll_flags = 0; + u32 adjusted_clock = mode->clock; - memset(&args, 0, sizeof(args)); + /* reset the pll flags */ + pll->flags = 0; if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | - RADEON_PLL_PREFER_CLOSEST_LOWER); + pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | + RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; } else { - pll_flags |= RADEON_PLL_LEGACY; + pll->flags |= RADEON_PLL_LEGACY; if (mode->clock > 200000) /* range limits??? */ - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { - if (!ASIC_IS_AVIVO(rdev)) { - if (encoder->encoder_type != - DRM_MODE_ENCODER_DAC) - pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; - if (!ASIC_IS_AVIVO(rdev) - && (encoder->encoder_type == - DRM_MODE_ENCODER_LVDS)) - pll_flags |= RADEON_PLL_USE_REF_DIV; - } radeon_encoder = to_radeon_encoder(encoder); + if (ASIC_IS_AVIVO(rdev)) { + /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) + adjusted_clock = mode->clock * 2; + } else { + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) + pll->flags |= RADEON_PLL_USE_REF_DIV; + } break; } } @@ -472,36 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) * special hw requirements. */ if (ASIC_IS_DCE3(rdev)) { - ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; + union adjust_pixel_clock args; + struct radeon_encoder_atom_dig *dig; + u8 frev, crev; + int index; - if (!encoder) - return; - - memset(&adjust_pll_args, 0, sizeof(adjust_pll_args)); - adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10); - adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id; - adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder); + if (!radeon_encoder->enc_priv) + return adjusted_clock; + dig = radeon_encoder->enc_priv; index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); - atom_execute_table(rdev->mode_info.atom_context, - index, (uint32_t *)&adjust_pll_args); - adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; - } else { - /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ - if (ASIC_IS_AVIVO(rdev) && - (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) - adjusted_clock = mode->clock * 2; - else - adjusted_clock = mode->clock; + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev); + + memset(&args, 0, sizeof(args)); + + switch (frev) { + case 1: + switch (crev) { + case 1: + case 2: + args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v1.ucTransmitterID = radeon_encoder->encoder_id; + args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); + + atom_execute_table(rdev->mode_info.atom_context, + index, (uint32_t *)&args); + adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return adjusted_clock; + } + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return adjusted_clock; + } } + return adjusted_clock; +} + +union set_pixel_clock { + SET_PIXEL_CLOCK_PS_ALLOCATION base; + PIXEL_CLOCK_PARAMETERS v1; + PIXEL_CLOCK_PARAMETERS_V2 v2; + PIXEL_CLOCK_PARAMETERS_V3 v3; +}; + +void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder = NULL; + struct radeon_encoder *radeon_encoder = NULL; + u8 frev, crev; + int index; + union set_pixel_clock args; + u32 pll_clock = mode->clock; + u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; + struct radeon_pll *pll; + u32 adjusted_clock; + + memset(&args, 0, sizeof(args)); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); + break; + } + } + + if (!radeon_encoder) + return; if (radeon_crtc->crtc_id == 0) pll = &rdev->clock.p1pll; else pll = &rdev->clock.p2pll; - radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div, pll_flags); + /* adjust pixel clock as needed */ + adjusted_clock = atombios_adjust_pll(crtc, mode, pll); + + if (ASIC_IS_AVIVO(rdev)) { + if (radeon_new_pll) + radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, + &fb_div, &frac_fb_div, + &ref_div, &post_div); + else + radeon_compute_pll(pll, adjusted_clock, &pll_clock, + &fb_div, &frac_fb_div, + &ref_div, &post_div); + } else + radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, @@ -511,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) case 1: switch (crev) { case 1: - spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; - spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); - spc1_ptr->usRefDiv = cpu_to_le16(ref_div); - spc1_ptr->usFbDiv = cpu_to_le16(fb_div); - spc1_ptr->ucFracFbDiv = frac_fb_div; - spc1_ptr->ucPostDiv = post_div; - spc1_ptr->ucPpll = + args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v1.usRefDiv = cpu_to_le16(ref_div); + args.v1.usFbDiv = cpu_to_le16(fb_div); + args.v1.ucFracFbDiv = frac_fb_div; + args.v1.ucPostDiv = post_div; + args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - spc1_ptr->ucCRTC = radeon_crtc->crtc_id; - spc1_ptr->ucRefDivSrc = 1; + args.v1.ucCRTC = radeon_crtc->crtc_id; + args.v1.ucRefDivSrc = 1; break; case 2: - spc2_ptr = - (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; - spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); - spc2_ptr->usRefDiv = cpu_to_le16(ref_div); - spc2_ptr->usFbDiv = cpu_to_le16(fb_div); - spc2_ptr->ucFracFbDiv = frac_fb_div; - spc2_ptr->ucPostDiv = post_div; - spc2_ptr->ucPpll = + args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v2.usRefDiv = cpu_to_le16(ref_div); + args.v2.usFbDiv = cpu_to_le16(fb_div); + args.v2.ucFracFbDiv = frac_fb_div; + args.v2.ucPostDiv = post_div; + args.v2.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - spc2_ptr->ucCRTC = radeon_crtc->crtc_id; - spc2_ptr->ucRefDivSrc = 1; + args.v2.ucCRTC = radeon_crtc->crtc_id; + args.v2.ucRefDivSrc = 1; break; case 3: - if (!encoder) - return; - spc3_ptr = - (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; - spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); - spc3_ptr->usRefDiv = cpu_to_le16(ref_div); - spc3_ptr->usFbDiv = cpu_to_le16(fb_div); - spc3_ptr->ucFracFbDiv = frac_fb_div; - spc3_ptr->ucPostDiv = post_div; - spc3_ptr->ucPpll = + args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v3.usRefDiv = cpu_to_le16(ref_div); + args.v3.usFbDiv = cpu_to_le16(fb_div); + args.v3.ucFracFbDiv = frac_fb_div; + args.v3.ucPostDiv = post_div; + args.v3.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); - spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; - spc3_ptr->ucEncoderMode = + args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); + args.v3.ucTransmitterId = radeon_encoder->encoder_id; + args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); break; default: @@ -562,33 +615,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) return; } - printk("executing set pll\n"); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } -int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; - struct drm_radeon_gem_object *obj_priv; + struct radeon_bo *rbo; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; + int r; - if (!crtc->fb) - return -EINVAL; + /* no fb bound */ + if (!crtc->fb) { + DRM_DEBUG("No FB bound\n"); + return 0; + } radeon_fb = to_radeon_framebuffer(crtc->fb); + /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; - obj_priv = obj->driver_private; - - if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { + rbo = obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); return -EINVAL; } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); + radeon_bo_unreserve(rbo); switch (crtc->fb->bits_per_pixel) { case 8: @@ -618,8 +681,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - radeon_object_get_tiling_flags(obj->driver_private, - &tiling_flags, NULL); if (tiling_flags & RADEON_TILING_MACRO) fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; @@ -674,7 +735,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb && old_fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(old_fb); - radeon_gem_object_unpin(radeon_fb->obj); + rbo = radeon_fb->obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + radeon_bo_unpin(rbo); + radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ @@ -683,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + + if (ASIC_IS_AVIVO(rdev)) + return avivo_crtc_set_base(crtc, x, y, old_fb); + else + return radeon_crtc_set_base(crtc, x, y, old_fb); +} + +/* properly set additional regs when using atombios */ +static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + u32 disp_merge_cntl; + + switch (radeon_crtc->crtc_id) { + case 0: + disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); + disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; + WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); + break; + case 1: + disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); + disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; + WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); + WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); + WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); + break; + } +} + int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -704,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, else { if (radeon_crtc->crtc_id == 0) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); - radeon_crtc_set_base(crtc, x, y, old_fb); - radeon_legacy_atom_set_surface(crtc); + atombios_crtc_set_base(crtc, x, y, old_fb); + radeon_legacy_atom_fixup(crtc); } atombios_overscan_setup(crtc, mode, adjusted_mode); atombios_scaler_setup(crtc); @@ -723,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { - atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); atombios_lock_crtc(crtc, 1); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c new file mode 100644 index 00000000000..71060114d5d --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -0,0 +1,785 @@ +/* + * Copyright 2007-8 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon.h" + +#include "atom.h" +#include "atom-bits.h" +#include "drm_dp_helper.h" + +/* move these to drm_dp_helper.c/h */ +#define DP_LINK_CONFIGURATION_SIZE 9 +#define DP_LINK_STATUS_SIZE 6 +#define DP_DPCD_SIZE 8 + +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; + +static const int dp_clocks[] = { + 54000, /* 1 lane, 1.62 Ghz */ + 90000, /* 1 lane, 2.70 Ghz */ + 108000, /* 2 lane, 1.62 Ghz */ + 180000, /* 2 lane, 2.70 Ghz */ + 216000, /* 4 lane, 1.62 Ghz */ + 360000, /* 4 lane, 2.70 Ghz */ +}; + +static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); + +/* common helper functions */ +static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) +{ + int i; + u8 max_link_bw; + u8 max_lane_count; + + if (!dpcd) + return 0; + + max_link_bw = dpcd[DP_MAX_LINK_RATE]; + max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + default: + for (i = 0; i < num_dp_clocks; i++) { + if (i % 2) + continue; + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } + if (dp_clocks[i] > mode_clock) { + if (i < 2) + return 1; + else if (i < 4) + return 2; + else + return 4; + } + } + break; + case DP_LINK_BW_2_7: + for (i = 0; i < num_dp_clocks; i++) { + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } + if (dp_clocks[i] > mode_clock) { + if (i < 2) + return 1; + else if (i < 4) + return 2; + else + return 4; + } + } + break; + } + + return 0; +} + +static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) +{ + int i; + u8 max_link_bw; + u8 max_lane_count; + + if (!dpcd) + return 0; + + max_link_bw = dpcd[DP_MAX_LINK_RATE]; + max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + default: + for (i = 0; i < num_dp_clocks; i++) { + if (i % 2) + continue; + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } + if (dp_clocks[i] > mode_clock) + return 162000; + } + break; + case DP_LINK_BW_2_7: + for (i = 0; i < num_dp_clocks; i++) { + switch (max_lane_count) { + case 1: + if (i > 1) + return 0; + break; + case 2: + if (i > 3) + return 0; + break; + case 4: + default: + break; + } + if (dp_clocks[i] > mode_clock) + return (i % 2) ? 270000 : 162000; + } + } + + return 0; +} + +int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) +{ + int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); + int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); + + if ((lanes == 0) || (bw == 0)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) + +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + +/* XXX fix me -- chip specific */ +#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 +static u8 dp_pre_emphasis_max(u8 voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } +} + +static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count, + u8 train_set[4]) +{ + u8 v = 0; + u8 p = 0; + int lane; + + for (lane = 0; lane < lane_count; lane++) { + u8 this_v = dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); + + DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n", + lane, + voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], + pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + if (v >= DP_VOLTAGE_MAX) + v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + + if (p >= dp_pre_emphasis_max(v)) + p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n", + voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], + pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); + + for (lane = 0; lane < 4; lane++) + train_set[lane] = v | p; +} + + +/* radeon aux chan functions */ +bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, + int num_bytes, u8 *read_byte, + u8 read_buf_len, u8 delay) +{ + struct drm_device *dev = chan->dev; + struct radeon_device *rdev = dev->dev_private; + PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); + unsigned char *base; + + memset(&args, 0, sizeof(args)); + + base = (unsigned char *)rdev->mode_info.atom_context->scratch; + + memcpy(base, req_bytes, num_bytes); + + args.lpAuxRequest = 0; + args.lpDataOut = 16; + args.ucDataOutLen = 0; + args.ucChannelID = chan->rec.i2c_id; + args.ucDelay = delay / 10; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + if (args.ucReplyStatus) { + DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", + req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], + chan->rec.i2c_id, args.ucReplyStatus); + return false; + } + + if (args.ucDataOutLen && read_byte && read_buf_len) { + if (read_buf_len < args.ucDataOutLen) { + DRM_ERROR("Buffer to small for return answer %d %d\n", + read_buf_len, args.ucDataOutLen); + return false; + } + { + int len = min(read_buf_len, args.ucDataOutLen); + memcpy(read_byte, base + 16, len); + } + } + return true; +} + +bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, + uint8_t send_bytes, uint8_t *send) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + u8 msg[20]; + u8 msg_len, dp_msg_len; + bool ret; + + dp_msg_len = 4; + msg[0] = address; + msg[1] = address >> 8; + msg[2] = AUX_NATIVE_WRITE << 4; + dp_msg_len += send_bytes; + msg[3] = (dp_msg_len << 4) | (send_bytes - 1); + + if (send_bytes > 16) + return false; + + memcpy(&msg[4], send, send_bytes); + msg_len = 4 + send_bytes; + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); + return ret; +} + +bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, + uint8_t delay, uint8_t expected_bytes, + uint8_t *read_p) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + u8 msg[20]; + u8 msg_len, dp_msg_len; + bool ret = false; + msg_len = 4; + dp_msg_len = 4; + msg[0] = address; + msg[1] = address >> 8; + msg[2] = AUX_NATIVE_READ << 4; + msg[3] = (dp_msg_len) << 4; + msg[3] |= expected_bytes - 1; + + ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); + return ret; +} + +/* radeon dp functions */ +static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, + uint8_t ucconfig, uint8_t lane_num) +{ + DP_ENCODER_SERVICE_PARAMETERS args; + int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); + + memset(&args, 0, sizeof(args)); + args.ucLinkClock = dp_clock / 10; + args.ucConfig = ucconfig; + args.ucAction = action; + args.ucLaneNum = lane_num; + args.ucStatus = 0; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + return args.ucStatus; +} + +u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + + return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, + dig_connector->dp_i2c_bus->rec.i2c_id, 0); +} + +bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + u8 msg[25]; + int ret; + + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); + if (ret) { + memcpy(dig_connector->dpcd, msg, 8); + { + int i; + DRM_DEBUG("DPCD: "); + for (i = 0; i < 8; i++) + DRM_DEBUG("%02x ", msg[i]); + DRM_DEBUG("\n"); + } + return true; + } + dig_connector->dpcd[0] = 0; + return false; +} + +void radeon_dp_set_link_config(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) + return; + + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->con_priv) + return; + dig_connector = radeon_connector->con_priv; + + dig_connector->dp_clock = + dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); + dig_connector->dp_lane_count = + dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); +} + +int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, + struct drm_display_mode *mode) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + + return dp_mode_valid(dig_connector->dpcd, mode->clock); +} + +static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + int ret; + ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, + DP_LINK_STATUS_SIZE, link_status); + if (!ret) { + DRM_ERROR("displayport link status failed\n"); + return false; + } + + DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); + return true; +} + +bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!atom_dp_get_link_status(radeon_connector, link_status)) + return false; + if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) + return false; + return true; +} + +static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + + if (dig_connector->dpcd[0] >= 0x11) { + radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, + &power_state); + } +} + +static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) +{ + radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1, + &downspread); +} + +static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, + u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]) +{ + radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, + link_configuration); +} + +static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, + struct drm_encoder *encoder, + u8 train_set[4]) +{ + struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + int i; + + for (i = 0; i < dig_connector->dp_lane_count; i++) + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, + i, train_set[i]); + + radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, + dig_connector->dp_lane_count, train_set); +} + +static void dp_set_training(struct radeon_connector *radeon_connector, + u8 training) +{ + radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET, + 1, &training); +} + +void dp_link_train(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + int enc_id = 0; + bool clock_recovery, channel_eq; + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]; + u8 tries, voltage; + u8 train_set[4]; + int i; + + if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) + return; + + if (!radeon_encoder->enc_priv) + return; + dig = radeon_encoder->enc_priv; + + radeon_connector = to_radeon_connector(connector); + if (!radeon_connector->con_priv) + return; + dig_connector = radeon_connector->con_priv; + + if (dig->dig_encoder) + enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; + else + enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; + if (dig_connector->linkb) + enc_id |= ATOM_DP_CONFIG_LINK_B; + else + enc_id |= ATOM_DP_CONFIG_LINK_A; + + memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + if (dig_connector->dp_clock == 270000) + link_configuration[0] = DP_LINK_BW_2_7; + else + link_configuration[0] = DP_LINK_BW_1_62; + link_configuration[1] = dig_connector->dp_lane_count; + if (dig_connector->dpcd[0] >= 0x11) + link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + /* power up the sink */ + dp_set_power(radeon_connector, DP_SET_POWER_D0); + /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + /* set link bw and lanes on the sink */ + dp_set_link_bw_lanes(radeon_connector, link_configuration); + /* disable downspread on the sink */ + dp_set_downspread(radeon_connector, 0); + /* start training on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, + dig_connector->dp_clock, enc_id, 0); + /* set training pattern 1 on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 0); + + /* set initial vs/emph */ + memset(train_set, 0, 4); + udelay(400); + /* set training pattern 1 on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); + + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + + /* clock recovery loop */ + clock_recovery = false; + tries = 0; + voltage = 0xff; + for (;;) { + udelay(100); + if (!atom_dp_get_link_status(radeon_connector, link_status)) + break; + + if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { + clock_recovery = true; + break; + } + + for (i = 0; i < dig_connector->dp_lane_count; i++) { + if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + } + if (i == dig_connector->dp_lane_count) { + DRM_ERROR("clock recovery reached max voltage\n"); + break; + } + + if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) { + DRM_ERROR("clock recovery tried 5 times\n"); + break; + } + } else + tries = 0; + + voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new train_set as requested by sink */ + dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + } + if (!clock_recovery) + DRM_ERROR("clock recovery failed\n"); + else + DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n", + train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, + (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT); + + + /* set training pattern 2 on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); + /* set training pattern 2 on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 1); + + /* channel equalization loop */ + tries = 0; + channel_eq = false; + for (;;) { + udelay(400); + if (!atom_dp_get_link_status(radeon_connector, link_status)) + break; + + if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) { + DRM_ERROR("channel eq failed: 5 tries\n"); + break; + } + + /* Compute new train_set as requested by sink */ + dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); + dp_update_dpvs_emph(radeon_connector, encoder, train_set); + + tries++; + } + + if (!channel_eq) + DRM_ERROR("channel eq failed\n"); + else + DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n", + train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, + (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) + >> DP_TRAIN_PRE_EMPHASIS_SHIFT); + + /* disable the training pattern on the sink */ + dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, + dig_connector->dp_clock, enc_id, 0); +} + +int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; + int ret = 0; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + int msg_len, dp_msg_len; + int reply_bytes; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[2] = AUX_I2C_READ << 4; + else + msg[2] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[2] |= AUX_I2C_MOT << 4; + + msg[0] = address; + msg[1] = address >> 8; + + reply_bytes = 1; + + msg_len = 4; + dp_msg_len = 3; + switch (mode) { + case MODE_I2C_WRITE: + msg[4] = write_byte; + msg_len++; + dp_msg_len += 2; + break; + case MODE_I2C_READ: + dp_msg_len += 1; + break; + default: + break; + } + + msg[3] = (dp_msg_len) << 4; + ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); + + if (ret) { + if (read_byte) + *read_byte = reply[0]; + return reply_bytes; + } + return -EREMOTEIO; +} + diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 0d79577c157..607241c6a8a 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename) fseek(file, 0, SEEK_SET); /* get header */ - if (fgets(buf, 1024, file) == NULL) + if (fgets(buf, 1024, file) == NULL) { + fclose(file); return -1; + } /* first line will contain the last register * and gpu name */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9e93eabcf1..11c9a3fe681 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -65,6 +65,96 @@ MODULE_FIRMWARE(FIRMWARE_R520); * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +/* hpd for digital panel detect/disconnect */ +bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) +{ + bool connected = false; + + switch (hpd) { + case RADEON_HPD_1: + if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) + connected = true; + break; + case RADEON_HPD_2: + if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) + connected = true; + break; + default: + break; + } + return connected; +} + +void r100_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd) +{ + u32 tmp; + bool connected = r100_hpd_sense(rdev, hpd); + + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(RADEON_FP_GEN_CNTL); + if (connected) + tmp &= ~RADEON_FP_DETECT_INT_POL; + else + tmp |= RADEON_FP_DETECT_INT_POL; + WREG32(RADEON_FP_GEN_CNTL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(RADEON_FP2_GEN_CNTL); + if (connected) + tmp &= ~RADEON_FP2_DETECT_INT_POL; + else + tmp |= RADEON_FP2_DETECT_INT_POL; + WREG32(RADEON_FP2_GEN_CNTL, tmp); + break; + default: + break; + } +} + +void r100_hpd_init(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + rdev->irq.hpd[1] = true; + break; + default: + break; + } + } + if (rdev->irq.installed) + r100_irq_set(rdev); +} + +void r100_hpd_fini(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + rdev->irq.hpd[1] = false; + break; + default: + break; + } + } +} + /* * PCI GART */ @@ -94,6 +184,15 @@ int r100_pci_gart_init(struct radeon_device *rdev) return radeon_gart_table_ram_alloc(rdev); } +/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ +void r100_enable_bm(struct radeon_device *rdev) +{ + uint32_t tmp; + /* Enable bus mastering */ + tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; + WREG32(RADEON_BUS_CNTL, tmp); +} + int r100_pci_gart_enable(struct radeon_device *rdev) { uint32_t tmp; @@ -105,9 +204,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev) WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; WREG32(RADEON_AIC_HI_ADDR, tmp); - /* Enable bus mastering */ - tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; - WREG32(RADEON_BUS_CNTL, tmp); /* set PCI GART page-table base address */ WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; @@ -148,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WREG32(R_000040_GEN_INT_CNTL, 0); + return -EINVAL; + } if (rdev->irq.sw_int) { tmp |= RADEON_SW_INT_ENABLE; } @@ -157,6 +258,12 @@ int r100_irq_set(struct radeon_device *rdev) if (rdev->irq.crtc_vblank_int[1]) { tmp |= RADEON_CRTC2_VBLANK_MASK; } + if (rdev->irq.hpd[0]) { + tmp |= RADEON_FP_DETECT_MASK; + } + if (rdev->irq.hpd[1]) { + tmp |= RADEON_FP2_DETECT_MASK; + } WREG32(RADEON_GEN_INT_CNTL, tmp); return 0; } @@ -175,8 +282,9 @@ void r100_irq_disable(struct radeon_device *rdev) static inline uint32_t r100_irq_ack(struct radeon_device *rdev) { uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); - uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | - RADEON_CRTC2_VBLANK_STAT; + uint32_t irq_mask = RADEON_SW_INT_TEST | + RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | + RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; if (irqs) { WREG32(RADEON_GEN_INT_STATUS, irqs); @@ -187,6 +295,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) int r100_irq_process(struct radeon_device *rdev) { uint32_t status, msi_rearm; + bool queue_hotplug = false; status = r100_irq_ack(rdev); if (!status) { @@ -207,8 +316,18 @@ int r100_irq_process(struct radeon_device *rdev) if (status & RADEON_CRTC2_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 1); } + if (status & RADEON_FP_DETECT_STAT) { + queue_hotplug = true; + DRM_DEBUG("HPD1\n"); + } + if (status & RADEON_FP2_DETECT_STAT) { + queue_hotplug = true; + DRM_DEBUG("HPD2\n"); + } status = r100_irq_ack(rdev); } + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS400: @@ -243,6 +362,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 16) | (1 << 17)); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | + RADEON_HDP_READ_BUFFER_INVALIDATE); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); /* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); radeon_ring_write(rdev, fence->seq); @@ -255,24 +379,27 @@ int r100_wb_init(struct radeon_device *rdev) int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, - true, - RADEON_GEM_DOMAIN_GTT, - false, &rdev->wb.wb_obj); + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, + RADEON_GEM_DOMAIN_GTT, + &rdev->wb.wb_obj); if (r) { - DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); + dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); return r; } - r = radeon_object_pin(rdev->wb.wb_obj, - RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); if (r) { - DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); + dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); + radeon_bo_unreserve(rdev->wb.wb_obj); return r; } - r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); if (r) { - DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); + dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); return r; } } @@ -290,11 +417,19 @@ void r100_wb_disable(struct radeon_device *rdev) void r100_wb_fini(struct radeon_device *rdev) { + int r; + r100_wb_disable(rdev); if (rdev->wb.wb_obj) { - radeon_object_kunmap(rdev->wb.wb_obj); - radeon_object_unpin(rdev->wb.wb_obj); - radeon_object_unref(&rdev->wb.wb_obj); + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + dev_err(rdev->dev, "(%d) can't finish WB\n", r); + return; + } + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; } @@ -1250,7 +1385,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case RADEON_TXFORMAT_ARGB4444: case RADEON_TXFORMAT_VYUY422: case RADEON_TXFORMAT_YVYU422: - case RADEON_TXFORMAT_DXT1: case RADEON_TXFORMAT_SHADOW16: case RADEON_TXFORMAT_LDUDV655: case RADEON_TXFORMAT_DUDV88: @@ -1258,12 +1392,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p, break; case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: - case RADEON_TXFORMAT_DXT23: - case RADEON_TXFORMAT_DXT45: case RADEON_TXFORMAT_SHADOW32: case RADEON_TXFORMAT_LDUDUV8888: track->textures[i].cpp = 4; break; + case RADEON_TXFORMAT_DXT1: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT1; + break; + case RADEON_TXFORMAT_DXT23: + case RADEON_TXFORMAT_DXT45: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT35; + break; } track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); @@ -1288,17 +1429,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, - struct radeon_object *robj) + struct radeon_bo *robj) { unsigned idx; u32 value; idx = pkt->idx + 1; value = radeon_get_ib_value(p, idx + 2); - if ((value + 1) > radeon_object_size(robj)) { + if ((value + 1) > radeon_bo_size(robj)) { DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " "(need %u have %lu) !\n", value + 1, - radeon_object_size(robj)); + radeon_bo_size(robj)); return -EINVAL; } return 0; @@ -1363,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL; } + track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); track->immd_dwords = pkt->count - 1; r = r100_cs_track_check(p->rdev, track); @@ -1650,6 +1792,17 @@ int r100_gpu_reset(struct radeon_device *rdev) return 0; } +void r100_set_common_regs(struct radeon_device *rdev) +{ + /* set these so they don't interfere with anything */ + WREG32(RADEON_OV0_SCALE_CNTL, 0); + WREG32(RADEON_SUBPIC_CNTL, 0); + WREG32(RADEON_VIPH_CONTROL, 0); + WREG32(RADEON_I2C_CNTL_1, 0); + WREG32(RADEON_DVI_I2C_CNTL_1, 0); + WREG32(RADEON_CAP0_TRIG_CNTL, 0); + WREG32(RADEON_CAP1_TRIG_CNTL, 0); +} /* * VRAM info @@ -2588,13 +2741,14 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) DRM_ERROR("coordinate type %d\n", t->tex_coord_type); DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); + DRM_ERROR("compress format %d\n", t->compress_format); } static int r100_cs_track_cube(struct radeon_device *rdev, struct r100_cs_track *track, unsigned idx) { unsigned face, w, h; - struct radeon_object *cube_robj; + struct radeon_bo *cube_robj; unsigned long size; for (face = 0; face < 5; face++) { @@ -2607,9 +2761,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev, size += track->textures[idx].cube_info[face].offset; - if (size > radeon_object_size(cube_robj)) { + if (size > radeon_bo_size(cube_robj)) { DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", - size, radeon_object_size(cube_robj)); + size, radeon_bo_size(cube_robj)); r100_cs_track_texture_print(&track->textures[idx]); return -1; } @@ -2617,10 +2771,40 @@ static int r100_cs_track_cube(struct radeon_device *rdev, return 0; } +static int r100_track_compress_size(int compress_format, int w, int h) +{ + int block_width, block_height, block_bytes; + int wblocks, hblocks; + int min_wblocks; + int sz; + + block_width = 4; + block_height = 4; + + switch (compress_format) { + case R100_TRACK_COMP_DXT1: + block_bytes = 8; + min_wblocks = 4; + break; + default: + case R100_TRACK_COMP_DXT35: + block_bytes = 16; + min_wblocks = 2; + break; + } + + hblocks = (h + block_height - 1) / block_height; + wblocks = (w + block_width - 1) / block_width; + if (wblocks < min_wblocks) + wblocks = min_wblocks; + sz = wblocks * hblocks * block_bytes; + return sz; +} + static int r100_cs_track_texture_check(struct radeon_device *rdev, struct r100_cs_track *track) { - struct radeon_object *robj; + struct radeon_bo *robj; unsigned long size; unsigned u, i, w, h; int ret; @@ -2654,9 +2838,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, h = h / (1 << i); if (track->textures[u].roundup_h) h = roundup_pow_of_two(h); - size += w * h; + if (track->textures[u].compress_format) { + + size += r100_track_compress_size(track->textures[u].compress_format, w, h); + /* compressed textures are block based */ + } else + size += w * h; } size *= track->textures[u].cpp; + switch (track->textures[u].tex_coord_type) { case 0: break; @@ -2676,9 +2866,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, "%u\n", track->textures[u].tex_coord_type, u); return -EINVAL; } - if (size > radeon_object_size(robj)) { + if (size > radeon_bo_size(robj)) { DRM_ERROR("Texture of unit %u needs %lu bytes but is " - "%lu\n", u, size, radeon_object_size(robj)); + "%lu\n", u, size, radeon_bo_size(robj)); r100_cs_track_texture_print(&track->textures[u]); return -EINVAL; } @@ -2695,15 +2885,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < track->num_cb; i++) { if (track->cb[i].robj == NULL) { + if (!(track->fastfill || track->color_channel_mask || + track->blend_read_enable)) { + continue; + } DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); return -EINVAL; } size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; size += track->cb[i].offset; - if (size > radeon_object_size(track->cb[i].robj)) { + if (size > radeon_bo_size(track->cb[i].robj)) { DRM_ERROR("[drm] Buffer too small for color buffer %d " "(need %lu have %lu) !\n", i, size, - radeon_object_size(track->cb[i].robj)); + radeon_bo_size(track->cb[i].robj)); DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", i, track->cb[i].pitch, track->cb[i].cpp, track->cb[i].offset, track->maxy); @@ -2717,10 +2911,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) } size = track->zb.pitch * track->zb.cpp * track->maxy; size += track->zb.offset; - if (size > radeon_object_size(track->zb.robj)) { + if (size > radeon_bo_size(track->zb.robj)) { DRM_ERROR("[drm] Buffer too small for z buffer " "(need %lu have %lu) !\n", size, - radeon_object_size(track->zb.robj)); + radeon_bo_size(track->zb.robj)); DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", track->zb.pitch, track->zb.cpp, track->zb.offset, track->maxy); @@ -2738,11 +2932,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) "bound\n", prim_walk, i); return -EINVAL; } - if (size > radeon_object_size(track->arrays[i].robj)) { - DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " - "have %lu dwords\n", prim_walk, i, - size >> 2, - radeon_object_size(track->arrays[i].robj) >> 2); + if (size > radeon_bo_size(track->arrays[i].robj)) { + dev_err(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); DRM_ERROR("Max indices %u\n", track->max_indx); return -EINVAL; } @@ -2756,10 +2951,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) "bound\n", prim_walk, i); return -EINVAL; } - if (size > radeon_object_size(track->arrays[i].robj)) { - DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " - "have %lu dwords\n", prim_walk, i, size >> 2, - radeon_object_size(track->arrays[i].robj) >> 2); + if (size > radeon_bo_size(track->arrays[i].robj)) { + dev_err(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); return -EINVAL; } } @@ -2821,6 +3018,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track track->arrays[i].esize = 0x7F; } for (i = 0; i < track->num_texture; i++) { + track->textures[i].compress_format = R100_TRACK_COMP_NONE; track->textures[i].pitch = 16536; track->textures[i].width = 16536; track->textures[i].height = 16536; @@ -3101,6 +3299,9 @@ static int r100_startup(struct radeon_device *rdev) { int r; + /* set common regs */ + r100_set_common_regs(rdev); + /* program mc */ r100_mc_program(rdev); /* Resume clock */ r100_clock_startup(rdev); @@ -3108,14 +3309,15 @@ static int r100_startup(struct radeon_device *rdev) r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ + r100_enable_bm(rdev); if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_enable(rdev); if (r) return r; } /* Enable IRQ */ - rdev->irq.sw_int = true; r100_irq_set(rdev); + rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -3150,6 +3352,8 @@ int r100_resume(struct radeon_device *rdev) radeon_combios_asic_init(rdev->ddev); /* Resume clock after posting */ r100_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return r100_startup(rdev); } @@ -3172,9 +3376,10 @@ void r100_fini(struct radeon_device *rdev) radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -3195,9 +3400,7 @@ int r100_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) { - printk(KERN_WARNING "[drm] Disabling AGP\n"); - rdev->flags &= ~RADEON_IS_AGP; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + radeon_agp_disable(rdev); } else { rdev->mc.gtt_location = rdev->mc.agp_base; } @@ -3242,14 +3445,14 @@ int r100_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; /* Set asic errata */ r100_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); + /* Initialize power management */ + radeon_pm_init(rdev); /* Get vram informations */ r100_vram_info(rdev); /* Initialize memory controller (also test AGP) */ @@ -3264,7 +3467,7 @@ int r100_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; if (rdev->flags & RADEON_IS_PCI) { diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 0daf0d76a89..b27a6999d21 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h @@ -10,26 +10,30 @@ * CS functions */ struct r100_cs_track_cb { - struct radeon_object *robj; + struct radeon_bo *robj; unsigned pitch; unsigned cpp; unsigned offset; }; struct r100_cs_track_array { - struct radeon_object *robj; + struct radeon_bo *robj; unsigned esize; }; struct r100_cs_cube_info { - struct radeon_object *robj; - unsigned offset; + struct radeon_bo *robj; + unsigned offset; unsigned width; unsigned height; }; +#define R100_TRACK_COMP_NONE 0 +#define R100_TRACK_COMP_DXT1 1 +#define R100_TRACK_COMP_DXT35 2 + struct r100_cs_track_texture { - struct radeon_object *robj; + struct radeon_bo *robj; struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ unsigned pitch; unsigned width; @@ -44,6 +48,7 @@ struct r100_cs_track_texture { bool enabled; bool roundup_w; bool roundup_h; + unsigned compress_format; }; struct r100_cs_track_limits { @@ -62,13 +67,15 @@ struct r100_cs_track { unsigned immd_dwords; unsigned num_arrays; unsigned max_indx; + unsigned color_channel_mask; struct r100_cs_track_array arrays[11]; struct r100_cs_track_cb cb[R300_MAX_CB]; struct r100_cs_track_cb zb; struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; bool z_enabled; bool separate_cube; - + bool fastfill; + bool blend_read_enable; }; int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index eb740fc3549..ff1e0cd608b 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, case 5: case 6: case 7: + /* 1D/2D */ track->textures[i].tex_coord_type = 0; break; case 1: - track->textures[i].tex_coord_type = 1; + /* CUBE */ + track->textures[i].tex_coord_type = 2; break; case 2: - track->textures[i].tex_coord_type = 2; + /* 3D */ + track->textures[i].tex_coord_type = 1; break; } break; @@ -401,7 +404,6 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_Y8: track->textures[i].cpp = 1; break; - case R200_TXFORMAT_DXT1: case R200_TXFORMAT_AI88: case R200_TXFORMAT_ARGB1555: case R200_TXFORMAT_RGB565: @@ -418,9 +420,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_TXFORMAT_ABGR8888: case R200_TXFORMAT_BGR111110: case R200_TXFORMAT_LDVDU8888: + track->textures[i].cpp = 4; + break; + case R200_TXFORMAT_DXT1: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT1; + break; case R200_TXFORMAT_DXT23: case R200_TXFORMAT_DXT45: - track->textures[i].cpp = 4; + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT1; break; } track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 2f43ee8e404..0051d11b907 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -36,7 +36,15 @@ #include "rv350d.h" #include "r300_reg_safe.h" -/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ +/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 + * + * GPU Errata: + * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL + * using MMIO to flush host path read cache, this lead to HARDLOCKUP. + * However, scheduling such write to the ring seems harmless, i suspect + * the CP read collide with the flush somehow, or maybe the MC, hard to + * tell. (Jerome Glisse) + */ /* * rv370,rv380 PCIE GART @@ -137,14 +145,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) void rv370_pcie_gart_disable(struct radeon_device *rdev) { - uint32_t tmp; + u32 tmp; + int r; tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); if (rdev->gart.table.vram.robj) { - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } } } @@ -173,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev, /* Wait until IDLE & CLEAN */ radeon_ring_write(rdev, PACKET0(0x1720, 0)); radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | + RADEON_HDP_READ_BUFFER_INVALIDATE); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); /* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); radeon_ring_write(rdev, fence->seq); @@ -681,7 +699,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p, r100_cs_dump_packet(p, pkt); return r; } - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_TXO_MACRO_TILE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_TXO_MICRO_TILE; + + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); + tmp |= tile_flags; + ib[idx] = tmp; track->textures[i].robj = reloc->robj; break; /* Tracked registers */ @@ -847,7 +873,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_Z6Y5X5: case R300_TX_FORMAT_W4Z4Y4X4: case R300_TX_FORMAT_W1Z5Y5X5: - case R300_TX_FORMAT_DXT1: case R300_TX_FORMAT_D3DMFT_CxV8U8: case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_G8R8_G8B8: @@ -861,8 +886,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x17: case R300_TX_FORMAT_FL_I32: case 0x1e: - case R300_TX_FORMAT_DXT3: - case R300_TX_FORMAT_DXT5: track->textures[i].cpp = 4; break; case R300_TX_FORMAT_W16Z16Y16X16: @@ -873,6 +896,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_FL_R32G32B32A32: track->textures[i].cpp = 16; break; + case R300_TX_FORMAT_DXT1: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT1; + break; + case R300_TX_FORMAT_ATI2N: + if (p->rdev->family < CHIP_R420) { + DRM_ERROR("Invalid texture format %u\n", + (idx_value & 0x1F)); + return -EINVAL; + } + /* The same rules apply as for DXT3/5. */ + /* Pass through. */ + case R300_TX_FORMAT_DXT3: + case R300_TX_FORMAT_DXT5: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT35; + break; default: DRM_ERROR("Invalid texture format %u\n", (idx_value & 0x1F)); @@ -932,6 +972,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].width_11 = tmp; tmp = ((idx_value >> 16) & 1) << 11; track->textures[i].height_11 = tmp; + + /* ATI1N */ + if (idx_value & (1 << 14)) { + /* The same rules apply as for DXT1. */ + track->textures[i].compress_format = + R100_TRACK_COMP_DXT1; + } + } else if (idx_value & (1 << 14)) { + DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); + return -EINVAL; } break; case 0x4480: @@ -973,6 +1023,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; + case 0x4e0c: + /* RB3D_COLOR_CHANNEL_MASK */ + track->color_channel_mask = idx_value; + break; + case 0x4d1c: + /* ZB_BW_CNTL */ + track->fastfill = !!(idx_value & (1 << 2)); + break; + case 0x4e04: + /* RB3D_BLENDCNTL */ + track->blend_read_enable = !!(idx_value & (1 << 2)); + break; case 0x4be8: /* valid register only on RV530 */ if (p->rdev->family == CHIP_RV530) @@ -1181,6 +1243,9 @@ static int r300_startup(struct radeon_device *rdev) { int r; + /* set common regs */ + r100_set_common_regs(rdev); + /* program mc */ r300_mc_program(rdev); /* Resume clock */ r300_clock_startup(rdev); @@ -1193,14 +1258,20 @@ static int r300_startup(struct radeon_device *rdev) if (r) return r; } + + if (rdev->family == CHIP_R300 || + rdev->family == CHIP_R350 || + rdev->family == CHIP_RV350) + r100_enable_bm(rdev); + if (rdev->flags & RADEON_IS_PCI) { r = r100_pci_gart_enable(rdev); if (r) return r; } /* Enable IRQ */ - rdev->irq.sw_int = true; r100_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -1237,6 +1308,8 @@ int r300_resume(struct radeon_device *rdev) radeon_combios_asic_init(rdev->ddev); /* Resume clock after posting */ r300_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return r300_startup(rdev); } @@ -1263,9 +1336,10 @@ void r300_fini(struct radeon_device *rdev) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -1303,14 +1377,14 @@ int r300_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; /* Set asic errata */ r300_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); + /* Initialize power management */ + radeon_pm_init(rdev); /* Get vram informations */ r300_vram_info(rdev); /* Initialize memory controller (also test AGP) */ @@ -1325,7 +1399,7 @@ int r300_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; if (rdev->flags & RADEON_IS_PCIE) { diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index cb2e470f97d..34bffa0e4b7 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, int sz; int addr; int type; - int clamp; + int isclamp; int stride; RING_LOCALS; @@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); - clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); + isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); addr |= (type << 16); - addr |= (clamp << 17); + addr |= (isclamp << 17); stride = type ? 4 : 6; diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 4b7afef35a6..1735a2b6958 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -900,6 +900,7 @@ # define R300_TX_FORMAT_FL_I32 0x1B # define R300_TX_FORMAT_FL_I32A32 0x1C # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D +# define R300_TX_FORMAT_ATI2N 0x1F /* alpha modes, convenience mostly */ /* if you have alpha, pick constant appropriate to the number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 1cefdbcc085..4526faaacca 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -30,7 +30,15 @@ #include "radeon_reg.h" #include "radeon.h" #include "atom.h" +#include "r100d.h" #include "r420d.h" +#include "r420_reg_safe.h" + +static void r420_set_reg_safe(struct radeon_device *rdev) +{ + rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; + rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); +} int r420_mc_init(struct radeon_device *rdev) { @@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) { - printk(KERN_WARNING "[drm] Disabling AGP\n"); - rdev->flags &= ~RADEON_IS_AGP; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + radeon_agp_disable(rdev); } else { rdev->mc.gtt_location = rdev->mc.agp_base; } @@ -165,10 +171,41 @@ static void r420_clock_resume(struct radeon_device *rdev) WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); } +static void r420_cp_errata_init(struct radeon_device *rdev) +{ + /* RV410 and R420 can lock up if CP DMA to host memory happens + * while the 2D engine is busy. + * + * The proper workaround is to queue a RESYNC at the beginning + * of the CP init, apparently. + */ + radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); + radeon_ring_lock(rdev, 8); + radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1)); + radeon_ring_write(rdev, rdev->config.r300.resync_scratch); + radeon_ring_write(rdev, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev); +} + +static void r420_cp_errata_fini(struct radeon_device *rdev) +{ + /* Catch the RESYNC we dispatched all the way back, + * at the very beginning of the CP init. + */ + radeon_ring_lock(rdev, 8); + radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, R300_RB3D_DC_FINISH); + radeon_ring_unlock_commit(rdev); + radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); +} + static int r420_startup(struct radeon_device *rdev) { int r; + /* set common regs */ + r100_set_common_regs(rdev); + /* program mc */ r300_mc_program(rdev); /* Resume clock */ r420_clock_resume(rdev); @@ -186,14 +223,15 @@ static int r420_startup(struct radeon_device *rdev) } r420_pipes_init(rdev); /* Enable IRQ */ - rdev->irq.sw_int = true; r100_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } + r420_cp_errata_init(rdev); r = r100_wb_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing WB (%d).\n", r); @@ -229,12 +267,14 @@ int r420_resume(struct radeon_device *rdev) } /* Resume clock after posting */ r420_clock_resume(rdev); - + /* Initialize surface registers */ + radeon_surface_init(rdev); return r420_startup(rdev); } int r420_suspend(struct radeon_device *rdev) { + r420_cp_errata_fini(rdev); r100_cp_disable(rdev); r100_wb_disable(rdev); r100_irq_disable(rdev); @@ -258,7 +298,7 @@ void r420_fini(struct radeon_device *rdev) radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); if (rdev->is_atom_bios) { radeon_atombios_fini(rdev); } else { @@ -301,14 +341,9 @@ int r420_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - if (rdev->is_atom_bios) { - atom_asic_init(rdev->mode_info.atom_context); - } else { - radeon_combios_asic_init(rdev->ddev); - } - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; + /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* Initialize power management */ @@ -331,10 +366,13 @@ int r420_init(struct radeon_device *rdev) return r; } /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) { return r; } + if (rdev->family == CHIP_R420) + r100_enable_bm(rdev); + if (rdev->flags & RADEON_IS_PCIE) { r = rv370_pcie_gart_init(rdev); if (r) @@ -345,7 +383,7 @@ int r420_init(struct radeon_device *rdev) if (r) return r; } - r300_set_reg_safe(rdev); + r420_set_reg_safe(rdev); rdev->accel_working = true; r = r420_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 7baa7395556..74ad89bdf2b 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -716,6 +716,8 @@ #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 +#define AVIVO_DC_GPIO_HPD_A 0x7e94 + #define AVIVO_GPIO_0 0x7e30 #define AVIVO_GPIO_1 0x7e40 #define AVIVO_GPIO_2 0x7e50 diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index f7435185c0a..9a189072f2b 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -185,8 +185,8 @@ static int r520_startup(struct radeon_device *rdev) return r; } /* Enable IRQ */ - rdev->irq.sw_int = true; rs600_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -221,6 +221,8 @@ int r520_resume(struct radeon_device *rdev) atom_asic_init(rdev->mode_info.atom_context); /* Resume clock after posting */ rv515_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return r520_startup(rdev); } @@ -254,6 +256,9 @@ int r520_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; + if (!radeon_card_posted(rdev) && rdev->bios) { DRM_INFO("GPU not posted. posting now...\n"); atom_asic_init(rdev->mode_info.atom_context); @@ -277,7 +282,7 @@ int r520_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; r = rv370_pcie_gart_init(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6740ed24358..1b6d0001b20 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -38,8 +38,10 @@ #define PFP_UCODE_SIZE 576 #define PM4_UCODE_SIZE 1792 +#define RLC_UCODE_SIZE 768 #define R700_PFP_UCODE_SIZE 848 #define R700_PM4_UCODE_SIZE 1360 +#define R700_RLC_UCODE_SIZE 1024 /* Firmware Names */ MODULE_FIRMWARE("radeon/R600_pfp.bin"); @@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin"); MODULE_FIRMWARE("radeon/RV730_me.bin"); MODULE_FIRMWARE("radeon/RV710_pfp.bin"); MODULE_FIRMWARE("radeon/RV710_me.bin"); +MODULE_FIRMWARE("radeon/R600_rlc.bin"); +MODULE_FIRMWARE("radeon/R700_rlc.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); @@ -70,6 +74,282 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); +/* hpd for digital panel detect/disconnect */ +bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) +{ + bool connected = false; + + if (ASIC_IS_DCE3(rdev)) { + switch (hpd) { + case RADEON_HPD_1: + if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_2: + if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_3: + if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_4: + if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + /* DCE 3.2 */ + case RADEON_HPD_5: + if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case RADEON_HPD_6: + if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + default: + break; + } + } else { + switch (hpd) { + case RADEON_HPD_1: + if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) + connected = true; + break; + case RADEON_HPD_2: + if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) + connected = true; + break; + case RADEON_HPD_3: + if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) + connected = true; + break; + default: + break; + } + } + return connected; +} + +void r600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd) +{ + u32 tmp; + bool connected = r600_hpd_sense(rdev, hpd); + + if (ASIC_IS_DCE3(rdev)) { + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(DC_HPD1_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(DC_HPD2_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + break; + case RADEON_HPD_3: + tmp = RREG32(DC_HPD3_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + break; + case RADEON_HPD_4: + tmp = RREG32(DC_HPD4_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + break; + case RADEON_HPD_5: + tmp = RREG32(DC_HPD5_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + break; + /* DCE 3.2 */ + case RADEON_HPD_6: + tmp = RREG32(DC_HPD6_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + break; + default: + break; + } + } else { + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); + if (connected) + tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; + else + tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); + if (connected) + tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; + else + tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); + break; + case RADEON_HPD_3: + tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); + if (connected) + tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; + else + tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); + break; + default: + break; + } + } +} + +void r600_hpd_init(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + if (ASIC_IS_DCE3(rdev)) { + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); + if (ASIC_IS_DCE32(rdev)) + tmp |= DC_HPDx_EN; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, tmp); + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, tmp); + rdev->irq.hpd[1] = true; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, tmp); + rdev->irq.hpd[2] = true; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, tmp); + rdev->irq.hpd[3] = true; + break; + /* DCE 3.2 */ + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, tmp); + rdev->irq.hpd[4] = true; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, tmp); + rdev->irq.hpd[5] = true; + break; + default: + break; + } + } + } else { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); + rdev->irq.hpd[1] = true; + break; + case RADEON_HPD_3: + WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); + rdev->irq.hpd[2] = true; + break; + default: + break; + } + } + } + if (rdev->irq.installed) + r600_irq_set(rdev); +} + +void r600_hpd_fini(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + if (ASIC_IS_DCE3(rdev)) { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HPD1_CONTROL, 0); + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + WREG32(DC_HPD2_CONTROL, 0); + rdev->irq.hpd[1] = false; + break; + case RADEON_HPD_3: + WREG32(DC_HPD3_CONTROL, 0); + rdev->irq.hpd[2] = false; + break; + case RADEON_HPD_4: + WREG32(DC_HPD4_CONTROL, 0); + rdev->irq.hpd[3] = false; + break; + /* DCE 3.2 */ + case RADEON_HPD_5: + WREG32(DC_HPD5_CONTROL, 0); + rdev->irq.hpd[4] = false; + break; + case RADEON_HPD_6: + WREG32(DC_HPD6_CONTROL, 0); + rdev->irq.hpd[5] = false; + break; + default: + break; + } + } + } else { + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); + rdev->irq.hpd[1] = false; + break; + case RADEON_HPD_3: + WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); + rdev->irq.hpd[2] = false; + break; + default: + break; + } + } + } +} + /* * R600 PCIE GART */ @@ -180,7 +460,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) void r600_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i; + int i, r; /* Disable all tables */ for (i = 0; i < 7; i++) @@ -208,8 +488,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); if (rdev->gart.table.vram.robj) { - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } } } @@ -340,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev) fixed20_12 a; u32 tmp; int chansize, numchan; - int r; /* Get VRAM informations */ rdev->mc.vram_is_ddr = true; @@ -383,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = rdev->mc.aper_size; if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) - return r; /* gtt_size is setup by radeon_agp_init */ rdev->mc.gtt_location = rdev->mc.agp_base; tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; @@ -443,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev) a.full = rfixed_const(100); rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + + if (rdev->flags & RADEON_IS_IGP) + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + return 0; } @@ -1101,7 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) (void)RREG32(PCIE_PORT_DATA); } - /* * CP & Ring */ @@ -1110,11 +1393,12 @@ void r600_cp_stop(struct radeon_device *rdev) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); } -int r600_cp_init_microcode(struct radeon_device *rdev) +int r600_init_microcode(struct radeon_device *rdev) { struct platform_device *pdev; const char *chip_name; - size_t pfp_req_size, me_req_size; + const char *rlc_chip_name; + size_t pfp_req_size, me_req_size, rlc_req_size; char fw_name[30]; int err; @@ -1128,30 +1412,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev) } switch (rdev->family) { - case CHIP_R600: chip_name = "R600"; break; - case CHIP_RV610: chip_name = "RV610"; break; - case CHIP_RV630: chip_name = "RV630"; break; - case CHIP_RV620: chip_name = "RV620"; break; - case CHIP_RV635: chip_name = "RV635"; break; - case CHIP_RV670: chip_name = "RV670"; break; + case CHIP_R600: + chip_name = "R600"; + rlc_chip_name = "R600"; + break; + case CHIP_RV610: + chip_name = "RV610"; + rlc_chip_name = "R600"; + break; + case CHIP_RV630: + chip_name = "RV630"; + rlc_chip_name = "R600"; + break; + case CHIP_RV620: + chip_name = "RV620"; + rlc_chip_name = "R600"; + break; + case CHIP_RV635: + chip_name = "RV635"; + rlc_chip_name = "R600"; + break; + case CHIP_RV670: + chip_name = "RV670"; + rlc_chip_name = "R600"; + break; case CHIP_RS780: - case CHIP_RS880: chip_name = "RS780"; break; - case CHIP_RV770: chip_name = "RV770"; break; + case CHIP_RS880: + chip_name = "RS780"; + rlc_chip_name = "R600"; + break; + case CHIP_RV770: + chip_name = "RV770"; + rlc_chip_name = "R700"; + break; case CHIP_RV730: - case CHIP_RV740: chip_name = "RV730"; break; - case CHIP_RV710: chip_name = "RV710"; break; + case CHIP_RV740: + chip_name = "RV730"; + rlc_chip_name = "R700"; + break; + case CHIP_RV710: + chip_name = "RV710"; + rlc_chip_name = "R700"; + break; default: BUG(); } if (rdev->family >= CHIP_RV770) { pfp_req_size = R700_PFP_UCODE_SIZE * 4; me_req_size = R700_PM4_UCODE_SIZE * 4; + rlc_req_size = R700_RLC_UCODE_SIZE * 4; } else { pfp_req_size = PFP_UCODE_SIZE * 4; me_req_size = PM4_UCODE_SIZE * 12; + rlc_req_size = RLC_UCODE_SIZE * 4; } - DRM_INFO("Loading %s CP Microcode\n", chip_name); + DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); @@ -1175,6 +1491,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev) rdev->me_fw->size, fw_name); err = -EINVAL; } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + if (err) + goto out; + if (rdev->rlc_fw->size != rlc_req_size) { + printk(KERN_ERR + "r600_rlc: Bogus length %zu in firmware \"%s\"\n", + rdev->rlc_fw->size, fw_name); + err = -EINVAL; + } + out: platform_device_unregister(pdev); @@ -1187,6 +1515,8 @@ out: rdev->pfp_fw = NULL; release_firmware(rdev->me_fw); rdev->me_fw = NULL; + release_firmware(rdev->rlc_fw); + rdev->rlc_fw = NULL; } return err; } @@ -1381,10 +1711,16 @@ int r600_ring_test(struct radeon_device *rdev) void r600_wb_disable(struct radeon_device *rdev) { + int r; + WREG32(SCRATCH_UMSK, 0); if (rdev->wb.wb_obj) { - radeon_object_kunmap(rdev->wb.wb_obj); - radeon_object_unpin(rdev->wb.wb_obj); + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return; + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); } } @@ -1392,7 +1728,7 @@ void r600_wb_fini(struct radeon_device *rdev) { r600_wb_disable(rdev); if (rdev->wb.wb_obj) { - radeon_object_unref(&rdev->wb.wb_obj); + radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; } @@ -1403,22 +1739,29 @@ int r600_wb_enable(struct radeon_device *rdev) int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, + RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); if (r) { - dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); + dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); + return r; + } + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + r600_wb_fini(rdev); return r; } - r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, &rdev->wb.gpu_addr); if (r) { - dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); + radeon_bo_unreserve(rdev->wb.wb_obj); + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); r600_wb_fini(rdev); return r; } - r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); if (r) { - dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); r600_wb_fini(rdev); return r; } @@ -1433,41 +1776,36 @@ int r600_wb_enable(struct radeon_device *rdev) void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { + /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ /* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(rdev, fence->seq); -} - -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence) -{ - /* FIXME: implement */ - return 0; + radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + radeon_ring_write(rdev, 1); + /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ + radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); + radeon_ring_write(rdev, RB_INT_STAT); } int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence) { - r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + int r; + + mutex_lock(&rdev->r600_blit.mutex); + rdev->r600_blit.vb_ib = NULL; + r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + if (r) { + if (rdev->r600_blit.vb_ib) + radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); + mutex_unlock(&rdev->r600_blit.mutex); + return r; + } r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); r600_blit_done_copy(rdev, fence); - return 0; -} - -int r600_irq_process(struct radeon_device *rdev) -{ - /* FIXME: implement */ - return 0; -} - -int r600_irq_set(struct radeon_device *rdev) -{ - /* FIXME: implement */ + mutex_unlock(&rdev->r600_blit.mutex); return 0; } @@ -1506,6 +1844,14 @@ int r600_startup(struct radeon_device *rdev) { int r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + r600_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); @@ -1515,13 +1861,27 @@ int r600_startup(struct radeon_device *rdev) return r; } r600_gpu_init(rdev); - - r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); + /* pin copy shader into vram */ + if (rdev->r600_blit.shader_obj) { + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } + } + /* Enable IRQ */ + r = r600_irq_init(rdev); if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); + DRM_ERROR("radeon: IH init failed (%d).\n", r); + radeon_irq_kms_fini(rdev); return r; } + r600_irq_set(rdev); r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) @@ -1583,13 +1943,22 @@ int r600_resume(struct radeon_device *rdev) int r600_suspend(struct radeon_device *rdev) { + int r; + /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->cp.ready = false; + r600_irq_suspend(rdev); r600_wb_disable(rdev); r600_pcie_gart_disable(rdev); /* unpin shaders bo */ - radeon_object_unpin(rdev->r600_blit.shader_obj); + if (rdev->r600_blit.shader_obj) { + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (!r) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } + } return 0; } @@ -1627,7 +1996,11 @@ int r600_init(struct radeon_device *rdev) if (r) return r; /* Post card if necessary */ - if (!r600_card_posted(rdev) && rdev->bios) { + if (!r600_card_posted(rdev)) { + if (!rdev->bios) { + dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); + return -EINVAL; + } DRM_INFO("GPU not posted. posting now...\n"); atom_asic_init(rdev->mode_info.atom_context); } @@ -1646,35 +2019,40 @@ int r600_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) + radeon_agp_disable(rdev); + } r = r600_mc_init(rdev); if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; + + r = radeon_irq_kms_init(rdev); + if (r) + return r; + rdev->cp.ring_obj = NULL; r600_ring_init(rdev, 1024 * 1024); - if (!rdev->me_fw || !rdev->pfp_fw) { - r = r600_cp_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } + rdev->ih.ring_obj = NULL; + r600_ih_ring_init(rdev, 64 * 1024); r = r600_pcie_gart_init(rdev); if (r) return r; - - rdev->accel_working = true; r = r600_blit_init(rdev); if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - return r; + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + rdev->accel_working = true; r = r600_startup(rdev); if (r) { r600_suspend(rdev); @@ -1686,15 +2064,20 @@ int r600_init(struct radeon_device *rdev) if (rdev->accel_working) { r = radeon_ib_pool_init(rdev); if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - rdev->accel_working = false; - } - r = r600_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); rdev->accel_working = false; + } else { + r = r600_ib_test(rdev); + if (r) { + dev_err(rdev->dev, "IB test failed (%d).\n", r); + rdev->accel_working = false; + } } } + + r = r600_audio_init(rdev); + if (r) + return r; /* TODO error handling */ return 0; } @@ -1703,16 +2086,18 @@ void r600_fini(struct radeon_device *rdev) /* Suspend operations */ r600_suspend(rdev); + r600_audio_fini(rdev); r600_blit_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); radeon_ring_fini(rdev); r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); - if (rdev->flags & RADEON_IS_AGP) - radeon_agp_fini(rdev); - radeon_object_fini(rdev); + radeon_agp_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -1798,8 +2183,668 @@ int r600_ib_test(struct radeon_device *rdev) return r; } +/* + * Interrupts + * + * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty + * the same as the CP ring buffer, but in reverse. Rather than the CPU + * writing to the ring and the GPU consuming, the GPU writes to the ring + * and host consumes. As the host irq handler processes interrupts, it + * increments the rptr. When the rptr catches up with the wptr, all the + * current interrupts have been processed. + */ + +void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) +{ + u32 rb_bufsz; + + /* Align ring size */ + rb_bufsz = drm_order(ring_size / 4); + ring_size = (1 << rb_bufsz) * 4; + rdev->ih.ring_size = ring_size; + rdev->ih.ptr_mask = rdev->ih.ring_size - 1; + rdev->ih.rptr = 0; +} + +static int r600_ih_ring_alloc(struct radeon_device *rdev) +{ + int r; + + /* Allocate ring buffer */ + if (rdev->ih.ring_obj == NULL) { + r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, + true, + RADEON_GEM_DOMAIN_GTT, + &rdev->ih.ring_obj); + if (r) { + DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); + return r; + } + r = radeon_bo_reserve(rdev->ih.ring_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->ih.ring_obj, + RADEON_GEM_DOMAIN_GTT, + &rdev->ih.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->ih.ring_obj); + DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); + return r; + } + r = radeon_bo_kmap(rdev->ih.ring_obj, + (void **)&rdev->ih.ring); + radeon_bo_unreserve(rdev->ih.ring_obj); + if (r) { + DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); + return r; + } + } + return 0; +} + +static void r600_ih_ring_fini(struct radeon_device *rdev) +{ + int r; + if (rdev->ih.ring_obj) { + r = radeon_bo_reserve(rdev->ih.ring_obj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->ih.ring_obj); + radeon_bo_unpin(rdev->ih.ring_obj); + radeon_bo_unreserve(rdev->ih.ring_obj); + } + radeon_bo_unref(&rdev->ih.ring_obj); + rdev->ih.ring = NULL; + rdev->ih.ring_obj = NULL; + } +} + +static void r600_rlc_stop(struct radeon_device *rdev) +{ + if (rdev->family >= CHIP_RV770) { + /* r7xx asics need to soft reset RLC before halting */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); + RREG32(SRBM_SOFT_RESET); + udelay(15000); + WREG32(SRBM_SOFT_RESET, 0); + RREG32(SRBM_SOFT_RESET); + } + + WREG32(RLC_CNTL, 0); +} +static void r600_rlc_start(struct radeon_device *rdev) +{ + WREG32(RLC_CNTL, RLC_ENABLE); +} + +static int r600_rlc_init(struct radeon_device *rdev) +{ + u32 i; + const __be32 *fw_data; + + if (!rdev->rlc_fw) + return -EINVAL; + + r600_rlc_stop(rdev); + + WREG32(RLC_HB_BASE, 0); + WREG32(RLC_HB_CNTL, 0); + WREG32(RLC_HB_RPTR, 0); + WREG32(RLC_HB_WPTR, 0); + WREG32(RLC_HB_WPTR_LSB_ADDR, 0); + WREG32(RLC_HB_WPTR_MSB_ADDR, 0); + WREG32(RLC_MC_CNTL, 0); + WREG32(RLC_UCODE_CNTL, 0); + + fw_data = (const __be32 *)rdev->rlc_fw->data; + if (rdev->family >= CHIP_RV770) { + for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } else { + for (i = 0; i < RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } + WREG32(RLC_UCODE_ADDR, 0); + + r600_rlc_start(rdev); + + return 0; +} + +static void r600_enable_interrupts(struct radeon_device *rdev) +{ + u32 ih_cntl = RREG32(IH_CNTL); + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + + ih_cntl |= ENABLE_INTR; + ih_rb_cntl |= IH_RB_ENABLE; + WREG32(IH_CNTL, ih_cntl); + WREG32(IH_RB_CNTL, ih_rb_cntl); + rdev->ih.enabled = true; +} + +static void r600_disable_interrupts(struct radeon_device *rdev) +{ + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + u32 ih_cntl = RREG32(IH_CNTL); + + ih_rb_cntl &= ~IH_RB_ENABLE; + ih_cntl &= ~ENABLE_INTR; + WREG32(IH_RB_CNTL, ih_rb_cntl); + WREG32(IH_CNTL, ih_cntl); + /* set rptr, wptr to 0 */ + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + rdev->ih.enabled = false; + rdev->ih.wptr = 0; + rdev->ih.rptr = 0; +} + +static void r600_disable_interrupt_state(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(CP_INT_CNTL, 0); + WREG32(GRBM_INT_CNTL, 0); + WREG32(DxMODE_INT_MASK, 0); + if (ASIC_IS_DCE3(rdev)) { + WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + if (ASIC_IS_DCE32(rdev)) { + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, 0); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, 0); + } + } else { + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DACB_AUTODETECT_INT_CONTROL, 0); + tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); + tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); + tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); + } +} + +int r600_irq_init(struct radeon_device *rdev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + + /* allocate ring */ + ret = r600_ih_ring_alloc(rdev); + if (ret) + return ret; + + /* disable irqs */ + r600_disable_interrupts(rdev); + + /* init rlc */ + ret = r600_rlc_init(rdev); + if (ret) { + r600_ih_ring_fini(rdev); + return ret; + } + + /* setup interrupt control */ + /* set dummy read address to ring address */ + WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + interrupt_cntl = RREG32(INTERRUPT_CNTL); + /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi + * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; + /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; + WREG32(INTERRUPT_CNTL, interrupt_cntl); + + WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); + rb_bufsz = drm_order(rdev->ih.ring_size / 4); + + ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | + IH_WPTR_OVERFLOW_CLEAR | + (rb_bufsz << 1)); + /* WPTR writeback, not yet */ + /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ + WREG32(IH_RB_WPTR_ADDR_LO, 0); + WREG32(IH_RB_WPTR_ADDR_HI, 0); + + WREG32(IH_RB_CNTL, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + + /* Default settings for IH_CNTL (disabled at first) */ + ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); + /* RPTR_REARM only works if msi's are enabled */ + if (rdev->msi_enabled) + ih_cntl |= RPTR_REARM; + +#ifdef __BIG_ENDIAN + ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT); +#endif + WREG32(IH_CNTL, ih_cntl); + + /* force the active interrupt state to all disabled */ + r600_disable_interrupt_state(rdev); + + /* enable irqs */ + r600_enable_interrupts(rdev); + + return ret; +} + +void r600_irq_suspend(struct radeon_device *rdev) +{ + r600_disable_interrupts(rdev); + r600_rlc_stop(rdev); +} + +void r600_irq_fini(struct radeon_device *rdev) +{ + r600_irq_suspend(rdev); + r600_ih_ring_fini(rdev); +} + +int r600_irq_set(struct radeon_device *rdev) +{ + u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 mode_int = 0; + u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + return -EINVAL; + } + /* don't enable anything if the ih is disabled */ + if (!rdev->ih.enabled) { + r600_disable_interrupts(rdev); + /* force the active interrupt state to all disabled */ + r600_disable_interrupt_state(rdev); + return 0; + } + + if (ASIC_IS_DCE3(rdev)) { + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + if (ASIC_IS_DCE32(rdev)) { + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + } + } else { + hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; + } + + if (rdev->irq.sw_int) { + DRM_DEBUG("r600_irq_set: sw int\n"); + cp_int_cntl |= RB_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0]) { + DRM_DEBUG("r600_irq_set: vblank 0\n"); + mode_int |= D1MODE_VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[1]) { + DRM_DEBUG("r600_irq_set: vblank 1\n"); + mode_int |= D2MODE_VBLANK_INT_MASK; + } + if (rdev->irq.hpd[0]) { + DRM_DEBUG("r600_irq_set: hpd 1\n"); + hpd1 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[1]) { + DRM_DEBUG("r600_irq_set: hpd 2\n"); + hpd2 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[2]) { + DRM_DEBUG("r600_irq_set: hpd 3\n"); + hpd3 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[3]) { + DRM_DEBUG("r600_irq_set: hpd 4\n"); + hpd4 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[4]) { + DRM_DEBUG("r600_irq_set: hpd 5\n"); + hpd5 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[5]) { + DRM_DEBUG("r600_irq_set: hpd 6\n"); + hpd6 |= DC_HPDx_INT_EN; + } + + WREG32(CP_INT_CNTL, cp_int_cntl); + WREG32(DxMODE_INT_MASK, mode_int); + if (ASIC_IS_DCE3(rdev)) { + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + if (ASIC_IS_DCE32(rdev)) { + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + } + } else { + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); + } + + return 0; +} + +static inline void r600_irq_ack(struct radeon_device *rdev, + u32 *disp_int, + u32 *disp_int_cont, + u32 *disp_int_cont2) +{ + u32 tmp; + + if (ASIC_IS_DCE3(rdev)) { + *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); + *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); + *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); + } else { + *disp_int = RREG32(DISP_INTERRUPT_STATUS); + *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + *disp_int_cont2 = 0; + } + + if (*disp_int & LB_D1_VBLANK_INTERRUPT) + WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); + if (*disp_int & LB_D1_VLINE_INTERRUPT) + WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); + if (*disp_int & LB_D2_VBLANK_INTERRUPT) + WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); + if (*disp_int & LB_D2_VLINE_INTERRUPT) + WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); + if (*disp_int & DC_HPD1_INTERRUPT) { + if (ASIC_IS_DCE3(rdev)) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } else { + tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); + } + } + if (*disp_int & DC_HPD2_INTERRUPT) { + if (ASIC_IS_DCE3(rdev)) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } else { + tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); + } + } + if (*disp_int_cont & DC_HPD3_INTERRUPT) { + if (ASIC_IS_DCE3(rdev)) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } else { + tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); + } + } + if (*disp_int_cont & DC_HPD4_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (ASIC_IS_DCE32(rdev)) { + if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } + } +} + +void r600_irq_disable(struct radeon_device *rdev) +{ + u32 disp_int, disp_int_cont, disp_int_cont2; + + r600_disable_interrupts(rdev); + /* Wait and acknowledge irq */ + mdelay(1); + r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + r600_disable_interrupt_state(rdev); +} + +static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) +{ + u32 wptr, tmp; + + /* XXX use writeback */ + wptr = RREG32(IH_RB_WPTR); + + if (wptr & RB_OVERFLOW) { + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", + wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; + tmp = RREG32(IH_RB_CNTL); + tmp |= IH_WPTR_OVERFLOW_CLEAR; + WREG32(IH_RB_CNTL, tmp); + } + return (wptr & rdev->ih.ptr_mask); +} + +/* r600 IV Ring + * Each IV ring entry is 128 bits: + * [7:0] - interrupt source id + * [31:8] - reserved + * [59:32] - interrupt source data + * [127:60] - reserved + * + * The basic interrupt vector entries + * are decoded as follows: + * src_id src_data description + * 1 0 D1 Vblank + * 1 1 D1 Vline + * 5 0 D2 Vblank + * 5 1 D2 Vline + * 19 0 FP Hot plug detection A + * 19 1 FP Hot plug detection B + * 19 2 DAC A auto-detection + * 19 3 DAC B auto-detection + * 176 - CP_INT RB + * 177 - CP_INT IB1 + * 178 - CP_INT IB2 + * 181 - EOP Interrupt + * 233 - GUI Idle + * + * Note, these are based on r600 and may need to be + * adjusted or added to on newer asics + */ + +int r600_irq_process(struct radeon_device *rdev) +{ + u32 wptr = r600_get_ih_wptr(rdev); + u32 rptr = rdev->ih.rptr; + u32 src_id, src_data; + u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; + unsigned long flags; + bool queue_hotplug = false; + + DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + if (!rdev->ih.enabled) + return IRQ_NONE; + + spin_lock_irqsave(&rdev->ih.lock, flags); + + if (rptr == wptr) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + if (rdev->shutdown) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } + +restart_ih: + /* display interrupts */ + r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + + rdev->ih.wptr = wptr; + while (rptr != wptr) { + /* wptr/rptr are in bytes! */ + ring_index = rptr / 4; + src_id = rdev->ih.ring[ring_index] & 0xff; + src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; + + switch (src_id) { + case 1: /* D1 vblank/vline */ + switch (src_data) { + case 0: /* D1 vblank */ + if (disp_int & LB_D1_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 0); + disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + } + break; + case 1: /* D1 vline */ + if (disp_int & LB_D1_VLINE_INTERRUPT) { + disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 5: /* D2 vblank/vline */ + switch (src_data) { + case 0: /* D2 vblank */ + if (disp_int & LB_D2_VBLANK_INTERRUPT) { + drm_handle_vblank(rdev->ddev, 1); + disp_int &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + } + break; + case 1: /* D1 vline */ + if (disp_int & LB_D2_VLINE_INTERRUPT) { + disp_int &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 19: /* HPD/DAC hotplug */ + switch (src_data) { + case 0: + if (disp_int & DC_HPD1_INTERRUPT) { + disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + } + break; + case 1: + if (disp_int & DC_HPD2_INTERRUPT) { + disp_int &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + } + break; + case 4: + if (disp_int_cont & DC_HPD3_INTERRUPT) { + disp_int_cont &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + } + break; + case 5: + if (disp_int_cont & DC_HPD4_INTERRUPT) { + disp_int_cont &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + } + break; + case 10: + if (disp_int_cont2 & DC_HPD5_INTERRUPT) { + disp_int_cont &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + } + break; + case 12: + if (disp_int_cont2 & DC_HPD6_INTERRUPT) { + disp_int_cont &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 176: /* CP_INT in ring buffer */ + case 177: /* CP_INT in IB1 */ + case 178: /* CP_INT in IB2 */ + DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); + radeon_fence_process(rdev); + break; + case 181: /* CP EOP event */ + DRM_DEBUG("IH: CP EOP\n"); + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + + /* wptr/rptr are in bytes! */ + rptr += 16; + rptr &= rdev->ih.ptr_mask; + } + /* make sure wptr hasn't changed while processing */ + wptr = r600_get_ih_wptr(rdev); + if (wptr != rdev->ih.wptr) + goto restart_ih; + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); + rdev->ih.rptr = rptr; + WREG32(IH_RB_RPTR, rdev->ih.rptr); + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_HANDLED; +} /* * Debugfs info @@ -1811,21 +2856,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - uint32_t rdp, wdp; unsigned count, i, j; radeon_ring_free_size(rdev); - rdp = RREG32(CP_RB_RPTR); - wdp = RREG32(CP_RB_WPTR); - count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; + count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw; seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); - seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); - seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); + seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); + seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); + seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr); + seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr); seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); + i = rdev->cp.rptr; for (j = 0; j <= count; j++) { - i = (rdp + j) & rdev->cp.ptr_mask; seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); + i = (i + 1) & rdev->cp.ptr_mask; } return 0; } diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c new file mode 100644 index 00000000000..99e2c3891a7 --- /dev/null +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -0,0 +1,267 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Christian König. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ +#include "drmP.h" +#include "radeon.h" +#include "radeon_reg.h" +#include "atom.h" + +#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ + +/* + * check if the chipset is supported + */ +static int r600_audio_chipset_supported(struct radeon_device *rdev) +{ + return rdev->family >= CHIP_R600 + || rdev->family == CHIP_RS600 + || rdev->family == CHIP_RS690 + || rdev->family == CHIP_RS740; +} + +/* + * current number of channels + */ +static int r600_audio_channels(struct radeon_device *rdev) +{ + return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1; +} + +/* + * current bits per sample + */ +static int r600_audio_bits_per_sample(struct radeon_device *rdev) +{ + uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4; + switch (value) { + case 0x0: return 8; + case 0x1: return 16; + case 0x2: return 20; + case 0x3: return 24; + case 0x4: return 32; + } + + DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value); + + return 16; +} + +/* + * current sampling rate in HZ + */ +static int r600_audio_rate(struct radeon_device *rdev) +{ + uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); + uint32_t result; + + if (value & 0x4000) + result = 44100; + else + result = 48000; + + result *= ((value >> 11) & 0x7) + 1; + result /= ((value >> 8) & 0x7) + 1; + + return result; +} + +/* + * iec 60958 status bits + */ +static uint8_t r600_audio_status_bits(struct radeon_device *rdev) +{ + return RREG32(R600_AUDIO_STATUS_BITS) & 0xff; +} + +/* + * iec 60958 category code + */ +static uint8_t r600_audio_category_code(struct radeon_device *rdev) +{ + return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff; +} + +/* + * update all hdmi interfaces with current audio parameters + */ +static void r600_audio_update_hdmi(unsigned long param) +{ + struct radeon_device *rdev = (struct radeon_device *)param; + struct drm_device *dev = rdev->ddev; + + int channels = r600_audio_channels(rdev); + int rate = r600_audio_rate(rdev); + int bps = r600_audio_bits_per_sample(rdev); + uint8_t status_bits = r600_audio_status_bits(rdev); + uint8_t category_code = r600_audio_category_code(rdev); + + struct drm_encoder *encoder; + int changes = 0; + + changes |= channels != rdev->audio_channels; + changes |= rate != rdev->audio_rate; + changes |= bps != rdev->audio_bits_per_sample; + changes |= status_bits != rdev->audio_status_bits; + changes |= category_code != rdev->audio_category_code; + + if (changes) { + rdev->audio_channels = channels; + rdev->audio_rate = rate; + rdev->audio_bits_per_sample = bps; + rdev->audio_status_bits = status_bits; + rdev->audio_category_code = category_code; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (changes || r600_hdmi_buffer_status_changed(encoder)) + r600_hdmi_update_audio_settings( + encoder, channels, + rate, bps, status_bits, + category_code); + } + + mod_timer(&rdev->audio_timer, + jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL)); +} + +/* + * initialize the audio vars and register the update timer + */ +int r600_audio_init(struct radeon_device *rdev) +{ + if (!r600_audio_chipset_supported(rdev)) + return 0; + + DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling"); + WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000); + + rdev->audio_channels = -1; + rdev->audio_rate = -1; + rdev->audio_bits_per_sample = -1; + rdev->audio_status_bits = 0; + rdev->audio_category_code = 0; + + setup_timer( + &rdev->audio_timer, + r600_audio_update_hdmi, + (unsigned long)rdev); + + mod_timer(&rdev->audio_timer, jiffies + 1); + + return 0; +} + +/* + * determin how the encoders and audio interface is wired together + */ +int r600_audio_tmds_index(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *other; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + return 0; + + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + /* special case check if an TMDS1 is present */ + list_for_each_entry(other, &dev->mode_config.encoder_list, head) { + if (to_radeon_encoder(other)->encoder_id == + ENCODER_OBJECT_ID_INTERNAL_TMDS1) + return 1; + } + return 0; + + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + return 1; + + default: + DRM_ERROR("Unsupported encoder type 0x%02X\n", + radeon_encoder->encoder_id); + return -1; + } +} + +/* + * atach the audio codec to the clock source of the encoder + */ +void r600_audio_set_clock(struct drm_encoder *encoder, int clock) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + int base_rate = 48000; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); + break; + + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); + break; + + default: + DRM_ERROR("Unsupported encoder type 0x%02X\n", + radeon_encoder->encoder_id); + return; + } + + switch (r600_audio_tmds_index(encoder)) { + case 0: + WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); + WREG32(R600_AUDIO_PLL1_DIV, clock*100); + WREG32(R600_AUDIO_CLK_SRCSEL, 0); + break; + + case 1: + WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); + WREG32(R600_AUDIO_PLL2_DIV, clock*100); + WREG32(R600_AUDIO_CLK_SRCSEL, 1); + break; + } +} + +/* + * release the audio timer + * TODO: How to do this correctly on SMP systems? + */ +void r600_audio_fini(struct radeon_device *rdev) +{ + if (!r600_audio_chipset_supported(rdev)) + return; + + WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); + + del_timer(&rdev->audio_timer); +} diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index dbf716e1fbf..af1c3ca8a4c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev) u32 packet2s[16]; int num_packet2s = 0; + mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) @@ -473,9 +474,8 @@ int r600_blit_init(struct radeon_device *rdev) obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_object_create(rdev, NULL, obj_size, - true, RADEON_GEM_DOMAIN_VRAM, - false, &rdev->r600_blit.shader_obj); + r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); return r; @@ -485,12 +485,14 @@ int r600_blit_init(struct radeon_device *rdev) obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); - r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } - if (rdev->family >= CHIP_RV770) memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len * 4); @@ -500,19 +502,28 @@ int r600_blit_init(struct radeon_device *rdev) if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); - - memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); - - radeon_object_kunmap(rdev->r600_blit.shader_obj); + radeon_bo_kunmap(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); return 0; } void r600_blit_fini(struct radeon_device *rdev) { - radeon_object_unpin(rdev->r600_blit.shader_obj); - radeon_object_unref(&rdev->r600_blit.shader_obj); + int r; + + if (rdev->r600_blit.shader_obj == NULL) + return; + /* If we can't reserve the bo, unref should be enough to destroy + * it when it becomes idle. + */ + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (!r) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } + radeon_bo_unref(&rdev->r600_blit.shader_obj); } int r600_vb_ib_get(struct radeon_device *rdev) @@ -547,7 +558,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) int dwords_per_loop = 76, num_loops; r = r600_vb_ib_get(rdev); - WARN_ON(r); + if (r) + return r; /* set_render_target emits 2 extra dwords on rv6xx */ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) @@ -569,11 +581,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) ring_size = num_loops * dwords_per_loop; /* set default + shaders */ ring_size += 40; /* shaders + def state */ - ring_size += 3; /* fence emit for VB IB */ + ring_size += 7; /* fence emit for VB IB */ ring_size += 5; /* done copy */ - ring_size += 3; /* fence emit for done copy */ + ring_size += 7; /* fence emit for done copy */ r = radeon_ring_lock(rdev, ring_size); - WARN_ON(r); + if (r) + return r; set_default_state(rdev); /* 14 */ set_shaders(rdev); /* 26 */ diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0d820764f34..e4c45ec1650 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; +struct r600_cs_track { + u32 cb_color0_base_last; +}; + /** * r600_cs_packet_parse() - parse cp packet and point ib index to next packet * @parser: parser structure holding parsing context. @@ -170,13 +174,35 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, idx, relocs_chunk->length_dw); return -EINVAL; } - *cs_reloc = &p->relocs[0]; + *cs_reloc = p->relocs; (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; return 0; } /** + * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc + * @parser: parser structure holding parsing context. + * + * Check next packet is relocation packet3, do bo validation and compute + * GPU offset using the provided start. + **/ +static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) +{ + struct radeon_cs_packet p3reloc; + int r; + + r = r600_cs_packet_parse(p, &p3reloc, p->idx); + if (r) { + return 0; + } + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { + return 0; + } + return 1; +} + +/** * r600_cs_packet_next_vline() - parse userspace VLINE packet * @parser: parser structure holding parsing context. * @@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { struct radeon_cs_reloc *reloc; + struct r600_cs_track *track; volatile u32 *ib; unsigned idx; unsigned i; @@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, int r; u32 idx_value; + track = (struct r600_cs_track *)p->track; ib = p->ib->ptr; idx = pkt->idx + 1; idx_value = radeon_get_ib_value(p, idx); @@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p, for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); switch (reg) { + /* This register were added late, there is userspace + * which does provide relocation for those but set + * 0 offset. In order to avoid breaking old userspace + * we detect this and set address to point to last + * CB_COLOR0_BASE, note that if userspace doesn't set + * CB_COLOR0_BASE before this register we will report + * error. Old userspace always set CB_COLOR0_BASE + * before any of this. + */ + case R_0280E0_CB_COLOR0_FRAG: + case R_0280E4_CB_COLOR1_FRAG: + case R_0280E8_CB_COLOR2_FRAG: + case R_0280EC_CB_COLOR3_FRAG: + case R_0280F0_CB_COLOR4_FRAG: + case R_0280F4_CB_COLOR5_FRAG: + case R_0280F8_CB_COLOR6_FRAG: + case R_0280FC_CB_COLOR7_FRAG: + case R_0280C0_CB_COLOR0_TILE: + case R_0280C4_CB_COLOR1_TILE: + case R_0280C8_CB_COLOR2_TILE: + case R_0280CC_CB_COLOR3_TILE: + case R_0280D0_CB_COLOR4_TILE: + case R_0280D4_CB_COLOR5_TILE: + case R_0280D8_CB_COLOR6_TILE: + case R_0280DC_CB_COLOR7_TILE: + if (!r600_cs_packet_next_is_pkt3_nop(p)) { + if (!track->cb_color0_base_last) { + dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); + return -EINVAL; + } + ib[idx+1+i] = track->cb_color0_base_last; + printk_once(KERN_WARNING "radeon: You have old & broken userspace " + "please consider updating mesa & xf86-video-ati\n"); + } else { + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + break; case DB_DEPTH_BASE: case DB_HTILE_DATA_BASE: case CB_COLOR0_BASE: + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color0_base_last = ib[idx+1+i]; + break; case CB_COLOR1_BASE: case CB_COLOR2_BASE: case CB_COLOR3_BASE: @@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, int r600_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; + struct r600_cs_track *track; int r; + track = kzalloc(sizeof(*track), GFP_KERNEL); + p->track = track; do { r = r600_cs_packet_parse(p, &pkt, p->idx); if (r) { @@ -717,7 +799,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) if (p->chunk_relocs_idx == -1) { return 0; } - p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL); + p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (p->relocs == NULL) { return -ENOMEM; } @@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; + parser.dev = &dev->pdev->dev; parser.rdev = NULL; parser.family = family; parser.ib = &fake_ib; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c new file mode 100644 index 00000000000..fcc949df0e5 --- /dev/null +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -0,0 +1,506 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Christian König. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon.h" +#include "atom.h" + +/* + * HDMI color format + */ +enum r600_hdmi_color_format { + RGB = 0, + YCC_422 = 1, + YCC_444 = 2 +}; + +/* + * IEC60958 status bits + */ +enum r600_hdmi_iec_status_bits { + AUDIO_STATUS_DIG_ENABLE = 0x01, + AUDIO_STATUS_V = 0x02, + AUDIO_STATUS_VCFG = 0x04, + AUDIO_STATUS_EMPHASIS = 0x08, + AUDIO_STATUS_COPYRIGHT = 0x10, + AUDIO_STATUS_NONAUDIO = 0x20, + AUDIO_STATUS_PROFESSIONAL = 0x40, + AUDIO_STATUS_LEVEL = 0x80 +}; + +struct { + uint32_t Clock; + + int N_32kHz; + int CTS_32kHz; + + int N_44_1kHz; + int CTS_44_1kHz; + + int N_48kHz; + int CTS_48kHz; + +} r600_hdmi_ACR[] = { + /* 32kHz 44.1kHz 48kHz */ + /* Clock N CTS N CTS N CTS */ + { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ + { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ + { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ + { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ + { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ + { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ + { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ + { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ + { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ + { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ + { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ +}; + +/* + * calculate CTS value if it's not found in the table + */ +static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) +{ + if (*CTS == 0) + *CTS = clock*N/(128*freq)*1000; + DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", + N, *CTS, freq); +} + +/* + * update the N and CTS parameters for a given pixel clock rate + */ +static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + int CTS; + int N; + int i; + + for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++); + + CTS = r600_hdmi_ACR[i].CTS_32kHz; + N = r600_hdmi_ACR[i].N_32kHz; + r600_hdmi_calc_CTS(clock, &CTS, N, 32000); + WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12); + WREG32(offset+R600_HDMI_32kHz_N, N); + + CTS = r600_hdmi_ACR[i].CTS_44_1kHz; + N = r600_hdmi_ACR[i].N_44_1kHz; + r600_hdmi_calc_CTS(clock, &CTS, N, 44100); + WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12); + WREG32(offset+R600_HDMI_44_1kHz_N, N); + + CTS = r600_hdmi_ACR[i].CTS_48kHz; + N = r600_hdmi_ACR[i].N_48kHz; + r600_hdmi_calc_CTS(clock, &CTS, N, 48000); + WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12); + WREG32(offset+R600_HDMI_48kHz_N, N); +} + +/* + * calculate the crc for a given info frame + */ +static void r600_hdmi_infoframe_checksum(uint8_t packetType, + uint8_t versionNumber, + uint8_t length, + uint8_t *frame) +{ + int i; + frame[0] = packetType + versionNumber + length; + for (i = 1; i <= length; i++) + frame[0] += frame[i]; + frame[0] = 0x100 - frame[0]; +} + +/* + * build a HDMI Video Info Frame + */ +static void r600_hdmi_videoinfoframe( + struct drm_encoder *encoder, + enum r600_hdmi_color_format color_format, + int active_information_present, + uint8_t active_format_aspect_ratio, + uint8_t scan_information, + uint8_t colorimetry, + uint8_t ex_colorimetry, + uint8_t quantization, + int ITC, + uint8_t picture_aspect_ratio, + uint8_t video_format_identification, + uint8_t pixel_repetition, + uint8_t non_uniform_picture_scaling, + uint8_t bar_info_data_valid, + uint16_t top_bar, + uint16_t bottom_bar, + uint16_t left_bar, + uint16_t right_bar +) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + uint8_t frame[14]; + + frame[0x0] = 0; + frame[0x1] = + (scan_information & 0x3) | + ((bar_info_data_valid & 0x3) << 2) | + ((active_information_present & 0x1) << 4) | + ((color_format & 0x3) << 5); + frame[0x2] = + (active_format_aspect_ratio & 0xF) | + ((picture_aspect_ratio & 0x3) << 4) | + ((colorimetry & 0x3) << 6); + frame[0x3] = + (non_uniform_picture_scaling & 0x3) | + ((quantization & 0x3) << 2) | + ((ex_colorimetry & 0x7) << 4) | + ((ITC & 0x1) << 7); + frame[0x4] = (video_format_identification & 0x7F); + frame[0x5] = (pixel_repetition & 0xF); + frame[0x6] = (top_bar & 0xFF); + frame[0x7] = (top_bar >> 8); + frame[0x8] = (bottom_bar & 0xFF); + frame[0x9] = (bottom_bar >> 8); + frame[0xA] = (left_bar & 0xFF); + frame[0xB] = (left_bar >> 8); + frame[0xC] = (right_bar & 0xFF); + frame[0xD] = (right_bar >> 8); + + r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); + + WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); + WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1, + frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); + WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2, + frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); + WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3, + frame[0xC] | (frame[0xD] << 8)); +} + +/* + * build a Audio Info Frame + */ +static void r600_hdmi_audioinfoframe( + struct drm_encoder *encoder, + uint8_t channel_count, + uint8_t coding_type, + uint8_t sample_size, + uint8_t sample_frequency, + uint8_t format, + uint8_t channel_allocation, + uint8_t level_shift, + int downmix_inhibit +) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + uint8_t frame[11]; + + frame[0x0] = 0; + frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4); + frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2); + frame[0x3] = format; + frame[0x4] = channel_allocation; + frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7); + frame[0x6] = 0; + frame[0x7] = 0; + frame[0x8] = 0; + frame[0x9] = 0; + frame[0xA] = 0; + + r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame); + + WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0, + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); + WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1, + frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24)); +} + +/* + * test if audio buffer is filled enough to start playing + */ +static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0; +} + +/* + * have buffer status changed since last call? + */ +int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + int status, result; + + if (!radeon_encoder->hdmi_offset) + return 0; + + status = r600_hdmi_is_audio_buffer_filled(encoder); + result = radeon_encoder->hdmi_buffer_status != status; + radeon_encoder->hdmi_buffer_status = status; + + return result; +} + +/* + * write the audio workaround status to the hardware + */ +void r600_hdmi_audio_workaround(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t offset = radeon_encoder->hdmi_offset; + + if (!offset) + return; + + if (r600_hdmi_is_audio_buffer_filled(encoder)) { + /* disable audio workaround and start delivering of audio frames */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001); + + } else if (radeon_encoder->hdmi_audio_workaround) { + /* enable audio workaround and start delivering of audio frames */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001); + + } else { + /* disable audio workaround and stop delivering of audio frames */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001); + } +} + + +/* + * update the info frames with the data from the current display mode + */ +void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + if (!offset) + return; + + r600_audio_set_clock(encoder, mode->clock); + + WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000); + WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0); + WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000); + + r600_hdmi_update_ACR(encoder, mode->clock); + + WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13); + + WREG32(offset+R600_HDMI_VERSION, 0x202); + + r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + + /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */ + WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF); + WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF); + WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001); + WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001); + + r600_hdmi_audio_workaround(encoder); + + /* audio packets per line, does anyone know how to calc this ? */ + WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000); + + /* update? reset? don't realy know */ + WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000); +} + +/* + * update settings with current parameters from audio engine + */ +void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, + int channels, + int rate, + int bps, + uint8_t status_bits, + uint8_t category_code) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + uint32_t iec; + + if (!offset) + return; + + DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n", + r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped", + channels, rate, bps); + DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", + (int)status_bits, (int)category_code); + + iec = 0; + if (status_bits & AUDIO_STATUS_PROFESSIONAL) + iec |= 1 << 0; + if (status_bits & AUDIO_STATUS_NONAUDIO) + iec |= 1 << 1; + if (status_bits & AUDIO_STATUS_COPYRIGHT) + iec |= 1 << 2; + if (status_bits & AUDIO_STATUS_EMPHASIS) + iec |= 1 << 3; + + iec |= category_code << 8; + + switch (rate) { + case 32000: iec |= 0x3 << 24; break; + case 44100: iec |= 0x0 << 24; break; + case 88200: iec |= 0x8 << 24; break; + case 176400: iec |= 0xc << 24; break; + case 48000: iec |= 0x2 << 24; break; + case 96000: iec |= 0xa << 24; break; + case 192000: iec |= 0xe << 24; break; + } + + WREG32(offset+R600_HDMI_IEC60958_1, iec); + + iec = 0; + switch (bps) { + case 16: iec |= 0x2; break; + case 20: iec |= 0x3; break; + case 24: iec |= 0xb; break; + } + if (status_bits & AUDIO_STATUS_V) + iec |= 0x5 << 16; + + WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f); + + /* 0x021 or 0x031 sets the audio frame length */ + WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31); + r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0); + + r600_hdmi_audio_workaround(encoder); + + /* update? reset? don't realy know */ + WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); +} + +/* + * enable/disable the HDMI engine + */ +void r600_hdmi_enable(struct drm_encoder *encoder, int enable) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + + if (!offset) + return; + + DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); + + /* some version of atombios ignore the enable HDMI flag + * so enabling/disabling HDMI was moved here for TMDS1+2 */ + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); + WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); + break; + + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4); + WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0); + break; + + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + /* This part is doubtfull in my opinion */ + WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0); + break; + + default: + DRM_ERROR("unknown HDMI output type\n"); + break; + } +} + +/* + * determin at which register offset the HDMI encoder is + */ +void r600_hdmi_init(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; + break; + + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + switch (r600_audio_tmds_index(encoder)) { + case 0: + radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; + break; + case 1: + radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; + break; + default: + radeon_encoder->hdmi_offset = 0; + break; + } + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; + break; + + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + radeon_encoder->hdmi_offset = R600_HDMI_DIG; + break; + + default: + radeon_encoder->hdmi_offset = 0; + break; + } + + DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", + radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); + + /* TODO: make this configureable */ + radeon_encoder->hdmi_audio_workaround = 0; +} diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index e2d1f5f33f7..d0e28ffdeda 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -110,5 +110,79 @@ #define R600_BIOS_6_SCRATCH 0x173c #define R600_BIOS_7_SCRATCH 0x1740 +/* Audio, these regs were reverse enginered, + * so the chance is high that the naming is wrong + * R6xx+ ??? */ + +/* Audio clocks */ +#define R600_AUDIO_PLL1_MUL 0x0514 +#define R600_AUDIO_PLL1_DIV 0x0518 +#define R600_AUDIO_PLL2_MUL 0x0524 +#define R600_AUDIO_PLL2_DIV 0x0528 +#define R600_AUDIO_CLK_SRCSEL 0x0534 + +/* Audio general */ +#define R600_AUDIO_ENABLE 0x7300 +#define R600_AUDIO_TIMING 0x7344 + +/* Audio params */ +#define R600_AUDIO_VENDOR_ID 0x7380 +#define R600_AUDIO_REVISION_ID 0x7384 +#define R600_AUDIO_ROOT_NODE_COUNT 0x7388 +#define R600_AUDIO_NID1_NODE_COUNT 0x738c +#define R600_AUDIO_NID1_TYPE 0x7390 +#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394 +#define R600_AUDIO_SUPPORTED_CODEC 0x7398 +#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c +#define R600_AUDIO_NID2_CAPS 0x73a0 +#define R600_AUDIO_NID3_CAPS 0x73a4 +#define R600_AUDIO_NID3_PIN_CAPS 0x73a8 + +/* Audio conn list */ +#define R600_AUDIO_CONN_LIST_LEN 0x73ac +#define R600_AUDIO_CONN_LIST 0x73b0 + +/* Audio verbs */ +#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0 +#define R600_AUDIO_PLAYING 0x73c4 +#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8 +#define R600_AUDIO_CONFIG_DEFAULT 0x73cc +#define R600_AUDIO_PIN_SENSE 0x73d0 +#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4 +#define R600_AUDIO_STATUS_BITS 0x73d8 + +/* HDMI base register addresses */ +#define R600_HDMI_TMDS1 0x7400 +#define R600_HDMI_TMDS2 0x7700 +#define R600_HDMI_DIG 0x7800 + +/* HDMI registers */ +#define R600_HDMI_ENABLE 0x00 +#define R600_HDMI_STATUS 0x04 +#define R600_HDMI_CNTL 0x08 +#define R600_HDMI_UNKNOWN_0 0x0C +#define R600_HDMI_AUDIOCNTL 0x10 +#define R600_HDMI_VIDEOCNTL 0x14 +#define R600_HDMI_VERSION 0x18 +#define R600_HDMI_UNKNOWN_1 0x28 +#define R600_HDMI_VIDEOINFOFRAME_0 0x54 +#define R600_HDMI_VIDEOINFOFRAME_1 0x58 +#define R600_HDMI_VIDEOINFOFRAME_2 0x5c +#define R600_HDMI_VIDEOINFOFRAME_3 0x60 +#define R600_HDMI_32kHz_CTS 0xac +#define R600_HDMI_32kHz_N 0xb0 +#define R600_HDMI_44_1kHz_CTS 0xb4 +#define R600_HDMI_44_1kHz_N 0xb8 +#define R600_HDMI_48kHz_CTS 0xbc +#define R600_HDMI_48kHz_N 0xc0 +#define R600_HDMI_AUDIOINFOFRAME_0 0xcc +#define R600_HDMI_AUDIOINFOFRAME_1 0xd0 +#define R600_HDMI_IEC60958_1 0xd4 +#define R600_HDMI_IEC60958_2 0xd8 +#define R600_HDMI_UNKNOWN_2 0xdc +#define R600_HDMI_AUDIO_DEBUG_0 0xe0 +#define R600_HDMI_AUDIO_DEBUG_1 0xe4 +#define R600_HDMI_AUDIO_DEBUG_2 0xe8 +#define R600_HDMI_AUDIO_DEBUG_3 0xec #endif diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 27ab428b149..30480881aed 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -456,7 +456,215 @@ #define WAIT_2D_IDLECLEAN_bit (1 << 16) #define WAIT_3D_IDLECLEAN_bit (1 << 17) - +#define IH_RB_CNTL 0x3e00 +# define IH_RB_ENABLE (1 << 0) +# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) +# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) +# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ +# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) +# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +#define IH_RB_BASE 0x3e04 +#define IH_RB_RPTR 0x3e08 +#define IH_RB_WPTR 0x3e0c +# define RB_OVERFLOW (1 << 0) +# define WPTR_OFFSET_MASK 0x3fffc +#define IH_RB_WPTR_ADDR_HI 0x3e10 +#define IH_RB_WPTR_ADDR_LO 0x3e14 +#define IH_CNTL 0x3e18 +# define ENABLE_INTR (1 << 0) +# define IH_MC_SWAP(x) ((x) << 2) +# define IH_MC_SWAP_NONE 0 +# define IH_MC_SWAP_16BIT 1 +# define IH_MC_SWAP_32BIT 2 +# define IH_MC_SWAP_64BIT 3 +# define RPTR_REARM (1 << 4) +# define MC_WRREQ_CREDIT(x) ((x) << 15) +# define MC_WR_CLEAN_CNT(x) ((x) << 20) + +#define RLC_CNTL 0x3f00 +# define RLC_ENABLE (1 << 0) +#define RLC_HB_BASE 0x3f10 +#define RLC_HB_CNTL 0x3f0c +#define RLC_HB_RPTR 0x3f20 +#define RLC_HB_WPTR 0x3f1c +#define RLC_HB_WPTR_LSB_ADDR 0x3f14 +#define RLC_HB_WPTR_MSB_ADDR 0x3f18 +#define RLC_MC_CNTL 0x3f44 +#define RLC_UCODE_CNTL 0x3f48 +#define RLC_UCODE_ADDR 0x3f2c +#define RLC_UCODE_DATA 0x3f30 + +#define SRBM_SOFT_RESET 0xe60 +# define SOFT_RESET_RLC (1 << 13) + +#define CP_INT_CNTL 0xc124 +# define CNTX_BUSY_INT_ENABLE (1 << 19) +# define CNTX_EMPTY_INT_ENABLE (1 << 20) +# define SCRATCH_INT_ENABLE (1 << 25) +# define TIME_STAMP_INT_ENABLE (1 << 26) +# define IB2_INT_ENABLE (1 << 29) +# define IB1_INT_ENABLE (1 << 30) +# define RB_INT_ENABLE (1 << 31) +#define CP_INT_STATUS 0xc128 +# define SCRATCH_INT_STAT (1 << 25) +# define TIME_STAMP_INT_STAT (1 << 26) +# define IB2_INT_STAT (1 << 29) +# define IB1_INT_STAT (1 << 30) +# define RB_INT_STAT (1 << 31) + +#define GRBM_INT_CNTL 0x8060 +# define RDERR_INT_ENABLE (1 << 0) +# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1) +# define GUI_IDLE_INT_ENABLE (1 << 19) + +#define INTERRUPT_CNTL 0x5468 +# define IH_DUMMY_RD_OVERRIDE (1 << 0) +# define IH_DUMMY_RD_EN (1 << 1) +# define IH_REQ_NONSNOOP_EN (1 << 3) +# define GEN_IH_INT_EN (1 << 8) +#define INTERRUPT_CNTL2 0x546c + +#define D1MODE_VBLANK_STATUS 0x6534 +#define D2MODE_VBLANK_STATUS 0x6d34 +# define DxMODE_VBLANK_OCCURRED (1 << 0) +# define DxMODE_VBLANK_ACK (1 << 4) +# define DxMODE_VBLANK_STAT (1 << 12) +# define DxMODE_VBLANK_INTERRUPT (1 << 16) +# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17) +#define D1MODE_VLINE_STATUS 0x653c +#define D2MODE_VLINE_STATUS 0x6d3c +# define DxMODE_VLINE_OCCURRED (1 << 0) +# define DxMODE_VLINE_ACK (1 << 4) +# define DxMODE_VLINE_STAT (1 << 12) +# define DxMODE_VLINE_INTERRUPT (1 << 16) +# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17) +#define DxMODE_INT_MASK 0x6540 +# define D1MODE_VBLANK_INT_MASK (1 << 0) +# define D1MODE_VLINE_INT_MASK (1 << 4) +# define D2MODE_VBLANK_INT_MASK (1 << 8) +# define D2MODE_VLINE_INT_MASK (1 << 12) +#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc +# define DC_HPD1_INTERRUPT (1 << 18) +# define DC_HPD2_INTERRUPT (1 << 19) +#define DISP_INTERRUPT_STATUS 0x7edc +# define LB_D1_VLINE_INTERRUPT (1 << 2) +# define LB_D2_VLINE_INTERRUPT (1 << 3) +# define LB_D1_VBLANK_INTERRUPT (1 << 4) +# define LB_D2_VBLANK_INTERRUPT (1 << 5) +# define DACA_AUTODETECT_INTERRUPT (1 << 16) +# define DACB_AUTODETECT_INTERRUPT (1 << 17) +# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18) +# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19) +# define DC_I2C_SW_DONE_INTERRUPT (1 << 20) +# define DC_I2C_HW_DONE_INTERRUPT (1 << 21) +#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8 +#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8 +# define DC_HPD4_INTERRUPT (1 << 14) +# define DC_HPD4_RX_INTERRUPT (1 << 15) +# define DC_HPD3_INTERRUPT (1 << 28) +# define DC_HPD1_RX_INTERRUPT (1 << 29) +# define DC_HPD2_RX_INTERRUPT (1 << 30) +#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec +# define DC_HPD3_RX_INTERRUPT (1 << 0) +# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1) +# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2) +# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3) +# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4) +# define AUX1_SW_DONE_INTERRUPT (1 << 5) +# define AUX1_LS_DONE_INTERRUPT (1 << 6) +# define AUX2_SW_DONE_INTERRUPT (1 << 7) +# define AUX2_LS_DONE_INTERRUPT (1 << 8) +# define AUX3_SW_DONE_INTERRUPT (1 << 9) +# define AUX3_LS_DONE_INTERRUPT (1 << 10) +# define AUX4_SW_DONE_INTERRUPT (1 << 11) +# define AUX4_LS_DONE_INTERRUPT (1 << 12) +# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13) +# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14) +/* DCE 3.2 */ +# define AUX5_SW_DONE_INTERRUPT (1 << 15) +# define AUX5_LS_DONE_INTERRUPT (1 << 16) +# define AUX6_SW_DONE_INTERRUPT (1 << 17) +# define AUX6_LS_DONE_INTERRUPT (1 << 18) +# define DC_HPD5_INTERRUPT (1 << 19) +# define DC_HPD5_RX_INTERRUPT (1 << 20) +# define DC_HPD6_INTERRUPT (1 << 21) +# define DC_HPD6_RX_INTERRUPT (1 << 22) + +#define DACA_AUTO_DETECT_CONTROL 0x7828 +#define DACB_AUTO_DETECT_CONTROL 0x7a28 +#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028 +#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128 +# define DACx_AUTODETECT_MODE(x) ((x) << 0) +# define DACx_AUTODETECT_MODE_NONE 0 +# define DACx_AUTODETECT_MODE_CONNECT 1 +# define DACx_AUTODETECT_MODE_DISCONNECT 2 +# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8) +/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */ +# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16) + +#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038 +#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138 +#define DACA_AUTODETECT_INT_CONTROL 0x7838 +#define DACB_AUTODETECT_INT_CONTROL 0x7a38 +# define DACx_AUTODETECT_ACK (1 << 0) +# define DACx_AUTODETECT_INT_ENABLE (1 << 16) + +#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00 +#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10 +#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24 +# define DC_HOT_PLUG_DETECTx_EN (1 << 0) + +#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04 +#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14 +#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28 +# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0) +# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1) + +/* DCE 3.0 */ +#define DC_HPD1_INT_STATUS 0x7d00 +#define DC_HPD2_INT_STATUS 0x7d0c +#define DC_HPD3_INT_STATUS 0x7d18 +#define DC_HPD4_INT_STATUS 0x7d24 +/* DCE 3.2 */ +#define DC_HPD5_INT_STATUS 0x7dc0 +#define DC_HPD6_INT_STATUS 0x7df4 +# define DC_HPDx_INT_STATUS (1 << 0) +# define DC_HPDx_SENSE (1 << 1) +# define DC_HPDx_RX_INT_STATUS (1 << 8) + +#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08 +#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18 +#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c +# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0) +# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8) +# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16) +/* DCE 3.0 */ +#define DC_HPD1_INT_CONTROL 0x7d04 +#define DC_HPD2_INT_CONTROL 0x7d10 +#define DC_HPD3_INT_CONTROL 0x7d1c +#define DC_HPD4_INT_CONTROL 0x7d28 +/* DCE 3.2 */ +#define DC_HPD5_INT_CONTROL 0x7dc4 +#define DC_HPD6_INT_CONTROL 0x7df8 +# define DC_HPDx_INT_ACK (1 << 0) +# define DC_HPDx_INT_POLARITY (1 << 8) +# define DC_HPDx_INT_EN (1 << 16) +# define DC_HPDx_RX_INT_ACK (1 << 20) +# define DC_HPDx_RX_INT_EN (1 << 24) + +/* DCE 3.0 */ +#define DC_HPD1_CONTROL 0x7d08 +#define DC_HPD2_CONTROL 0x7d14 +#define DC_HPD3_CONTROL 0x7d20 +#define DC_HPD4_CONTROL 0x7d2c +/* DCE 3.2 */ +#define DC_HPD5_CONTROL 0x7dc8 +#define DC_HPD6_CONTROL 0x7dfc +# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) +# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) +/* DCE 3.2 */ +# define DC_HPDx_EN (1 << 28) /* * PM4 @@ -500,7 +708,6 @@ #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D #define PACKET3_INDIRECT_BUFFER 0x32 -#define PACKET3_CP_INTERRUPT 0x40 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_TC_ACTION_ENA (1 << 23) @@ -674,4 +881,30 @@ #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) +#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 + +#define R_0280E0_CB_COLOR0_FRAG 0x0280E0 +#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) +#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_0280E0_BASE_256B 0x00000000 +#define R_0280E4_CB_COLOR1_FRAG 0x0280E4 +#define R_0280E8_CB_COLOR2_FRAG 0x0280E8 +#define R_0280EC_CB_COLOR3_FRAG 0x0280EC +#define R_0280F0_CB_COLOR4_FRAG 0x0280F0 +#define R_0280F4_CB_COLOR5_FRAG 0x0280F4 +#define R_0280F8_CB_COLOR6_FRAG 0x0280F8 +#define R_0280FC_CB_COLOR7_FRAG 0x0280FC +#define R_0280C0_CB_COLOR0_TILE 0x0280C0 +#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) +#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_0280C0_BASE_256B 0x00000000 +#define R_0280C4_CB_COLOR1_TILE 0x0280C4 +#define R_0280C8_CB_COLOR2_TILE 0x0280C8 +#define R_0280CC_CB_COLOR3_TILE 0x0280CC +#define R_0280D0_CB_COLOR4_TILE 0x0280D0 +#define R_0280D4_CB_COLOR5_TILE 0x0280D4 +#define R_0280D8_CB_COLOR6_TILE 0x0280D8 +#define R_0280DC_CB_COLOR7_TILE 0x0280DC + + #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 224506a2f7b..2d5f2bfa720 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -28,8 +28,6 @@ #ifndef __RADEON_H__ #define __RADEON_H__ -#include "radeon_object.h" - /* TODO: Here are things that needs to be done : * - surface allocator & initializer : (bit like scratch reg) should * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings @@ -67,6 +65,11 @@ #include <linux/list.h> #include <linux/kref.h> +#include <ttm/ttm_bo_api.h> +#include <ttm/ttm_bo_driver.h> +#include <ttm/ttm_placement.h> +#include <ttm/ttm_module.h> + #include "radeon_family.h" #include "radeon_mode.h" #include "radeon_reg.h" @@ -85,6 +88,8 @@ extern int radeon_benchmarking; extern int radeon_testing; extern int radeon_connector_table; extern int radeon_tv; +extern int radeon_new_pll; +extern int radeon_audio; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -157,6 +162,7 @@ struct radeon_fence_driver { struct list_head created; struct list_head emited; struct list_head signaled; + bool initialized; }; struct radeon_fence { @@ -186,76 +192,63 @@ void radeon_fence_unref(struct radeon_fence **fence); * Tiling registers */ struct radeon_surface_reg { - struct radeon_object *robj; + struct radeon_bo *bo; }; #define RADEON_GEM_MAX_SURFACES 8 /* - * Radeon buffer. + * TTM. */ -struct radeon_object; +struct radeon_mman { + struct ttm_bo_global_ref bo_global_ref; + struct ttm_global_reference mem_global_ref; + struct ttm_bo_device bdev; + bool mem_global_referenced; + bool initialized; +}; + +struct radeon_bo { + /* Protected by gem.mutex */ + struct list_head list; + /* Protected by tbo.reserved */ + u32 placements[3]; + struct ttm_placement placement; + struct ttm_buffer_object tbo; + struct ttm_bo_kmap_obj kmap; + unsigned pin_count; + void *kptr; + u32 tiling_flags; + u32 pitch; + int surface_reg; + /* Constant after initialization */ + struct radeon_device *rdev; + struct drm_gem_object *gobj; +}; -struct radeon_object_list { +struct radeon_bo_list { struct list_head list; - struct radeon_object *robj; + struct radeon_bo *bo; uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; - uint32_t tiling_flags; + u32 tiling_flags; }; -int radeon_object_init(struct radeon_device *rdev); -void radeon_object_fini(struct radeon_device *rdev); -int radeon_object_create(struct radeon_device *rdev, - struct drm_gem_object *gobj, - unsigned long size, - bool kernel, - uint32_t domain, - bool interruptible, - struct radeon_object **robj_ptr); -int radeon_object_kmap(struct radeon_object *robj, void **ptr); -void radeon_object_kunmap(struct radeon_object *robj); -void radeon_object_unref(struct radeon_object **robj); -int radeon_object_pin(struct radeon_object *robj, uint32_t domain, - uint64_t *gpu_addr); -void radeon_object_unpin(struct radeon_object *robj); -int radeon_object_wait(struct radeon_object *robj); -int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); -int radeon_object_evict_vram(struct radeon_device *rdev); -int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); -void radeon_object_force_delete(struct radeon_device *rdev); -void radeon_object_list_add_object(struct radeon_object_list *lobj, - struct list_head *head); -int radeon_object_list_validate(struct list_head *head, void *fence); -void radeon_object_list_unvalidate(struct list_head *head); -void radeon_object_list_clean(struct list_head *head); -int radeon_object_fbdev_mmap(struct radeon_object *robj, - struct vm_area_struct *vma); -unsigned long radeon_object_size(struct radeon_object *robj); -void radeon_object_clear_surface_reg(struct radeon_object *robj); -int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, - bool force_drop); -void radeon_object_set_tiling_flags(struct radeon_object *robj, - uint32_t tiling_flags, uint32_t pitch); -void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); -void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); /* * GEM objects. */ struct radeon_gem { + struct mutex mutex; struct list_head objects; }; int radeon_gem_init(struct radeon_device *rdev); void radeon_gem_fini(struct radeon_device *rdev); int radeon_gem_object_create(struct radeon_device *rdev, int size, - int alignment, int initial_domain, - bool discardable, bool kernel, - bool interruptible, - struct drm_gem_object **obj); + int alignment, int initial_domain, + bool discardable, bool kernel, + struct drm_gem_object **obj); int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, uint64_t *gpu_addr); void radeon_gem_object_unpin(struct drm_gem_object *obj); @@ -271,7 +264,7 @@ struct radeon_gart_table_ram { }; struct radeon_gart_table_vram { - struct radeon_object *robj; + struct radeon_bo *robj; volatile uint32_t *ptr; }; @@ -326,10 +319,12 @@ struct radeon_mc { u64 real_vram_size; int vram_mtrr; bool vram_is_ddr; + bool igp_sideport_enabled; }; int radeon_mc_setup(struct radeon_device *rdev); - +bool radeon_combios_sideport_present(struct radeon_device *rdev); +bool radeon_atombios_sideport_present(struct radeon_device *rdev); /* * GPU scratch registers structures, functions & helpers @@ -352,11 +347,16 @@ struct radeon_irq { bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ bool crtc_vblank_int[2]; + /* FIXME: use defines for max hpd/dacs */ + bool hpd[6]; + spinlock_t sw_lock; + int sw_refcount; }; int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); - +void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); +void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); /* * CP & ring. @@ -376,7 +376,7 @@ struct radeon_ib { */ struct radeon_ib_pool { struct mutex mutex; - struct radeon_object *robj; + struct radeon_bo *robj; struct list_head scheduled_ibs; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; @@ -384,7 +384,7 @@ struct radeon_ib_pool { }; struct radeon_cp { - struct radeon_object *ring_obj; + struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; unsigned wptr; @@ -399,8 +399,25 @@ struct radeon_cp { bool ready; }; +/* + * R6xx+ IH ring + */ +struct r600_ih { + struct radeon_bo *ring_obj; + volatile uint32_t *ring; + unsigned rptr; + unsigned wptr; + unsigned wptr_old; + unsigned ring_size; + uint64_t gpu_addr; + uint32_t ptr_mask; + spinlock_t lock; + bool enabled; +}; + struct r600_blit { - struct radeon_object *shader_obj; + struct mutex mutex; + struct radeon_bo *shader_obj; u64 shader_gpu_addr; u32 vs_offset, ps_offset; u32 state_offset; @@ -430,8 +447,8 @@ void radeon_ring_fini(struct radeon_device *rdev); */ struct radeon_cs_reloc { struct drm_gem_object *gobj; - struct radeon_object *robj; - struct radeon_object_list lobj; + struct radeon_bo *robj; + struct radeon_bo_list lobj; uint32_t handle; uint32_t flags; }; @@ -448,6 +465,7 @@ struct radeon_cs_chunk { }; struct radeon_cs_parser { + struct device *dev; struct radeon_device *rdev; struct drm_file *filp; /* chunks */ @@ -527,7 +545,7 @@ void radeon_agp_fini(struct radeon_device *rdev); * Writeback */ struct radeon_wb { - struct radeon_object *wb_obj; + struct radeon_bo *wb_obj; volatile uint32_t *wb; uint64_t gpu_addr; }; @@ -639,6 +657,10 @@ struct radeon_asic { uint32_t offset, uint32_t obj_size); int (*clear_surface_reg)(struct radeon_device *rdev, int reg); void (*bandwidth_update)(struct radeon_device *rdev); + void (*hpd_init)(struct radeon_device *rdev); + void (*hpd_fini)(struct radeon_device *rdev); + bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); + void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); }; /* @@ -647,11 +669,14 @@ struct radeon_asic { struct r100_asic { const unsigned *reg_safe_bm; unsigned reg_safe_bm_size; + u32 hdp_cntl; }; struct r300_asic { const unsigned *reg_safe_bm; unsigned reg_safe_bm_size; + u32 resync_scratch; + u32 hdp_cntl; }; struct r600_asic { @@ -751,9 +776,9 @@ struct radeon_device { uint8_t *bios; bool is_atom_bios; uint16_t bios_header_start; - struct radeon_object *stollen_vga_memory; + struct radeon_bo *stollen_vga_memory; struct fb_info *fbdev_info; - struct radeon_object *fbdev_robj; + struct radeon_bo *fbdev_rbo; struct radeon_framebuffer *fbdev_rfb; /* Register mmio */ resource_size_t rmmio_base; @@ -791,8 +816,20 @@ struct radeon_device { struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ + const struct firmware *rlc_fw; /* r6/700 RLC firmware */ struct r600_blit r600_blit; int msi_enabled; /* msi enabled */ + struct r600_ih ih; /* r6/700 interrupt ring */ + struct workqueue_struct *wq; + struct work_struct hotplug_work; + + /* audio stuff */ + struct timer_list audio_timer; + int audio_channels; + int audio_rate; + int audio_bits_per_sample; + uint8_t audio_status_bits; + uint8_t audio_category_code; }; int radeon_device_init(struct radeon_device *rdev, @@ -811,7 +848,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) { - if (reg < 0x10000) + if (reg < rdev->rmmio_size) return readl(((void __iomem *)rdev->rmmio) + reg); else { writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); @@ -821,7 +858,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - if (reg < 0x10000) + if (reg < rdev->rmmio_size) writel(v, ((void __iomem *)rdev->rmmio) + reg); else { writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); @@ -829,6 +866,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 } } +/* + * Cast helper + */ +#define to_radeon_fence(p) ((struct radeon_fence *)(p)) /* * Registers read & write functions. @@ -965,18 +1006,25 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) -#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) +#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) +#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) +#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) +#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) +#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) /* Common functions */ +/* AGP */ +extern void radeon_agp_disable(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev); +extern bool radeon_boot_test_post_card(struct radeon_device *rdev); extern int radeon_clocks_init(struct radeon_device *rdev); extern void radeon_clocks_fini(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); @@ -984,6 +1032,8 @@ extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); +extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); +extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ struct r100_mc_save { @@ -1021,7 +1071,7 @@ extern int r100_cp_reset(struct radeon_device *rdev); extern void r100_vga_render_disable(struct radeon_device *rdev); extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, - struct radeon_object *robj); + struct radeon_bo *robj); extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, const unsigned *auth, unsigned n, @@ -1029,6 +1079,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, extern int r100_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx); +extern void r100_enable_bm(struct radeon_device *rdev); +extern void r100_set_common_regs(struct radeon_device *rdev); /* rv200,rv250,rv280 */ extern void r200_set_safe_registers(struct radeon_device *rdev); @@ -1104,7 +1156,30 @@ extern void r600_wb_disable(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); -extern int r600_cp_init_microcode(struct radeon_device *rdev); +extern int r600_init_microcode(struct radeon_device *rdev); extern int r600_gpu_reset(struct radeon_device *rdev); +/* r600 irq */ +extern int r600_irq_init(struct radeon_device *rdev); +extern void r600_irq_fini(struct radeon_device *rdev); +extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); +extern int r600_irq_set(struct radeon_device *rdev); +extern void r600_irq_suspend(struct radeon_device *rdev); +/* r600 audio */ +extern int r600_audio_init(struct radeon_device *rdev); +extern int r600_audio_tmds_index(struct drm_encoder *encoder); +extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); +extern void r600_audio_fini(struct radeon_device *rdev); +extern void r600_hdmi_init(struct drm_encoder *encoder); +extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); +extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); +extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); +extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, + int channels, + int rate, + int bps, + uint8_t status_bits, + uint8_t category_code); + +#include "radeon_object.h" #endif diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 54bf49a6d67..c0681a5556d 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev) ret = drm_agp_info(rdev->ddev, &info); if (ret) { + drm_agp_release(rdev->ddev); DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } + + if (rdev->ddev->agp->agp_info.aper_size < 32) { + drm_agp_release(rdev->ddev); + dev_warn(rdev->dev, "AGP aperture too small (%zuM) " + "need at least 32M, disabling AGP\n", + rdev->ddev->agp->agp_info.aper_size); + return -EINVAL; + } + mode.mode = info.mode; agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; is_v3 = !!(agp_status & RADEON_AGPv3_MODE); @@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev) ret = drm_agp_enable(rdev->ddev, mode); if (ret) { DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); + drm_agp_release(rdev->ddev); return ret; } @@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev) void radeon_agp_fini(struct radeon_device *rdev) { #if __OS_HAS_AGP - if (rdev->flags & RADEON_IS_AGP) { - if (rdev->ddev->agp && rdev->ddev->agp->acquired) { - drm_agp_release(rdev->ddev); - } + if (rdev->ddev->agp && rdev->ddev->agp->acquired) { + drm_agp_release(rdev->ddev); } #endif } diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c18fbee387d..f2fbd2e4e9d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -33,6 +33,7 @@ */ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); +uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev); void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); @@ -76,6 +77,11 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg); void r100_bandwidth_update(struct radeon_device *rdev); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r100_ring_test(struct radeon_device *rdev); +void r100_hpd_init(struct radeon_device *rdev); +void r100_hpd_fini(struct radeon_device *rdev); +bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); +void r100_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); static struct radeon_asic r100_asic = { .init = &r100_init, @@ -100,13 +106,17 @@ static struct radeon_asic r100_asic = { .copy = &r100_copy_blit, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = NULL, + .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, }; @@ -155,13 +165,17 @@ static struct radeon_asic r300_asic = { .copy = &r100_copy_blit, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = NULL, + .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, }; /* @@ -201,6 +215,10 @@ static struct radeon_asic r420_asic = { .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, }; @@ -238,13 +256,17 @@ static struct radeon_asic rs400_asic = { .copy = &r100_copy_blit, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, - .get_memory_clock = NULL, + .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, }; @@ -263,6 +285,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); +void rs600_hpd_init(struct radeon_device *rdev); +void rs600_hpd_fini(struct radeon_device *rdev); +bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); +void rs600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); + static struct radeon_asic rs600_asic = { .init = &rs600_init, .fini = &rs600_fini, @@ -291,6 +319,10 @@ static struct radeon_asic rs600_asic = { .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, .bandwidth_update = &rs600_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, }; @@ -334,6 +366,10 @@ static struct radeon_asic rs690_asic = { .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &rs690_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, }; @@ -381,6 +417,10 @@ static struct radeon_asic rv515_asic = { .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, }; @@ -419,6 +459,10 @@ static struct radeon_asic r520_asic = { .set_surface_reg = r100_set_surface_reg, .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &rs600_hpd_init, + .hpd_fini = &rs600_hpd_fini, + .hpd_sense = &rs600_hpd_sense, + .hpd_set_polarity = &rs600_hpd_set_polarity, }; /* @@ -455,6 +499,11 @@ int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); +void r600_hpd_init(struct radeon_device *rdev); +void r600_hpd_fini(struct radeon_device *rdev); +bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); +void r600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); static struct radeon_asic r600_asic = { .init = &r600_init, @@ -470,6 +519,7 @@ static struct radeon_asic r600_asic = { .ring_ib_execute = &r600_ring_ib_execute, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, @@ -484,6 +534,10 @@ static struct radeon_asic r600_asic = { .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &r600_hpd_init, + .hpd_fini = &r600_hpd_fini, + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, }; /* @@ -509,6 +563,7 @@ static struct radeon_asic rv770_asic = { .ring_ib_execute = &r600_ring_ib_execute, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, + .get_vblank_counter = &rs600_get_vblank_counter, .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, @@ -523,6 +578,10 @@ static struct radeon_asic rv770_asic = { .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, .bandwidth_update = &rv515_bandwidth_update, + .hpd_init = &r600_hpd_init, + .hpd_fini = &r600_hpd_fini, + .hpd_sense = &r600_hpd_sense, + .hpd_set_polarity = &r600_hpd_set_polarity, }; #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 2ed88a82093..fa82ca74324 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, bool linkb, uint32_t igp_lane_info, - uint16_t connector_object_id); + uint16_t connector_object_id, + struct radeon_hpd *hpd); /* from radeon_legacy_encoder.c */ extern void @@ -60,16 +61,16 @@ union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; -static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device - *dev, uint8_t id) +static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, + uint8_t id) { - struct radeon_device *rdev = dev->dev_private; struct atom_context *ctx = rdev->mode_info.atom_context; - ATOM_GPIO_I2C_ASSIGMENT gpio; + ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset; + int i; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; @@ -78,34 +79,122 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); - gpio = i2c_info->asGPIO_Info[id]; - - i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; - i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; - i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; - i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; - i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); - i2c.put_clk_mask = (1 << gpio.ucClkEnShift); - i2c.put_data_mask = (1 << gpio.ucDataEnShift); - i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); - i2c.get_data_mask = (1 << gpio.ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); - i2c.a_data_mask = (1 << gpio.ucDataA_Shift); - i2c.valid = true; + + for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { + gpio = &i2c_info->asGPIO_Info[i]; + + if (gpio->sucI2cId.ucAccess == id) { + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); + i2c.en_data_mask = (1 << gpio->ucDataEnShift); + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); + + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) + i2c.hw_capable = true; + else + i2c.hw_capable = false; + + if (gpio->sucI2cId.ucAccess == 0xa0) + i2c.mm_i2c = true; + else + i2c.mm_i2c = false; + + i2c.i2c_id = gpio->sucI2cId.ucAccess; + + i2c.valid = true; + break; + } + } return i2c; } +static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, + u8 id) +{ + struct atom_context *ctx = rdev->mode_info.atom_context; + struct radeon_gpio_rec gpio; + int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); + struct _ATOM_GPIO_PIN_LUT *gpio_info; + ATOM_GPIO_PIN_ASSIGNMENT *pin; + u16 data_offset, size; + int i, num_indices; + + memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); + gpio.valid = false; + + atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); + + gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); + + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); + + for (i = 0; i < num_indices; i++) { + pin = &gpio_info->asGPIO_Pin[i]; + if (id == pin->ucGPIO_ID) { + gpio.id = pin->ucGPIO_ID; + gpio.reg = pin->usGpioPin_AIndex * 4; + gpio.mask = (1 << pin->ucGpioPinBitShift); + gpio.valid = true; + break; + } + } + + return gpio; +} + +static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev, + struct radeon_gpio_rec *gpio) +{ + struct radeon_hpd hpd; + hpd.gpio = *gpio; + if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { + switch(gpio->mask) { + case (1 << 0): + hpd.hpd = RADEON_HPD_1; + break; + case (1 << 8): + hpd.hpd = RADEON_HPD_2; + break; + case (1 << 16): + hpd.hpd = RADEON_HPD_3; + break; + case (1 << 24): + hpd.hpd = RADEON_HPD_4; + break; + case (1 << 26): + hpd.hpd = RADEON_HPD_5; + break; + case (1 << 28): + hpd.hpd = RADEON_HPD_6; + break; + default: + hpd.hpd = RADEON_HPD_NONE; + break; + } + } else + hpd.hpd = RADEON_HPD_NONE; + return hpd; +} + static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, struct radeon_i2c_bus_rec *i2c_bus, - uint16_t *line_mux) + uint16_t *line_mux, + struct radeon_hpd *hpd) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ @@ -135,6 +224,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } + /* HIS X1300 is DVI+VGA, not DVI+DVI */ + if ((dev->pdev->device == 0x7146) && + (dev->pdev->subsystem_vendor == 0x17af) && + (dev->pdev->subsystem_device == 0x2058)) { + if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) + return false; + } + + /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ + if ((dev->pdev->device == 0x7142) && + (dev->pdev->subsystem_vendor == 0x1458) && + (dev->pdev->subsystem_device == 0x2134)) { + if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) + return false; + } + + /* Funky macbooks */ if ((dev->pdev->device == 0x71C5) && (dev->pdev->subsystem_vendor == 0x106b) && @@ -172,6 +278,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } + /* Acer laptop reports DVI-D as DVI-I */ + if ((dev->pdev->device == 0x95c4) && + (dev->pdev->subsystem_vendor == 0x1025) && + (dev->pdev->subsystem_device == 0x013c)) { + if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) + *connector_type = DRM_MODE_CONNECTOR_DVID; + } + return true; } @@ -231,7 +346,9 @@ const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, - DRM_MODE_CONNECTOR_DisplayPort + DRM_MODE_CONNECTOR_DisplayPort, + DRM_MODE_CONNECTOR_eDP, + DRM_MODE_CONNECTOR_Unknown }; bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) @@ -240,16 +357,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); - uint16_t size, data_offset; - uint8_t frev, crev, line_mux = 0; + u16 size, data_offset; + u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; int i, j, path_size, device_support; int connector_type; - uint16_t igp_lane_info, conn_id, connector_object_id; + u16 igp_lane_info, conn_id, connector_object_id; bool linkb; struct radeon_i2c_bus_rec ddc_bus; + struct radeon_gpio_rec gpio; + struct radeon_hpd hpd; atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); @@ -276,7 +395,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); linkb = false; - if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; @@ -377,10 +495,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) } } - /* look up gpio for ddc */ + /* look up gpio for ddc, hpd */ if ((le16_to_cpu(path->usDeviceTag) & - (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - == 0) { + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(con_obj->asObjects[j]. @@ -394,21 +511,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; + ATOM_HPD_INT_RECORD *hpd_record; + ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; + hpd.hpd = RADEON_HPD_NONE; while (record->ucRecordType > 0 && record-> ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { - switch (record-> - ucRecordType) { + switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = - (ATOM_I2C_RECORD - *) record; - line_mux = - i2c_record-> - sucI2cId. - bfI2C_LineMux; + (ATOM_I2C_RECORD *) + record; + i2c_config = + (ATOM_I2C_ID_CONFIG_ACCESS *) + &i2c_record->sucI2cId; + ddc_bus = radeon_lookup_i2c_gpio(rdev, + i2c_config-> + ucAccess); + break; + case ATOM_HPD_INT_RECORD_TYPE: + hpd_record = + (ATOM_HPD_INT_RECORD *) + record; + gpio = radeon_lookup_gpio(rdev, + hpd_record->ucHPDIntGPIOID); + hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); + hpd.plugged_state = hpd_record->ucPlugged_PinState; break; } record = @@ -421,24 +551,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) break; } } - } else - line_mux = 0; - - if ((le16_to_cpu(path->usDeviceTag) == - ATOM_DEVICE_TV1_SUPPORT) - || (le16_to_cpu(path->usDeviceTag) == - ATOM_DEVICE_TV2_SUPPORT) - || (le16_to_cpu(path->usDeviceTag) == - ATOM_DEVICE_CV_SUPPORT)) + } else { + hpd.hpd = RADEON_HPD_NONE; ddc_bus.valid = false; - else - ddc_bus = radeon_lookup_gpio(dev, line_mux); + } conn_id = le16_to_cpu(path->usConnObjectId); if (!radeon_atom_apply_quirks (dev, le16_to_cpu(path->usDeviceTag), &connector_type, - &ddc_bus, &conn_id)) + &ddc_bus, &conn_id, &hpd)) continue; radeon_add_atom_connector(dev, @@ -447,7 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) usDeviceTag), connector_type, &ddc_bus, linkb, igp_lane_info, - connector_object_id); + connector_object_id, + &hpd); } } @@ -502,6 +625,7 @@ struct bios_connector { uint16_t devices; int connector_type; struct radeon_i2c_bus_rec ddc_bus; + struct radeon_hpd hpd; }; bool radeon_get_atom_connector_info_from_supported_devices_table(struct @@ -517,7 +641,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct uint16_t device_support; uint8_t dac; union atom_supported_devices *supported_devices; - int i, j; + int i, j, max_device; struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); @@ -527,7 +651,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { + if (frev > 1) + max_device = ATOM_MAX_SUPPORTED_DEVICE; + else + max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; + + for (i = 0; i < max_device; i++) { ATOM_CONNECTOR_INFO_I2C ci = supported_devices->info.asConnInfo[i]; @@ -553,22 +682,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; - if ((rdev->family == CHIP_RS690) || - (rdev->family == CHIP_RS740)) { - if ((i == ATOM_DEVICE_DFP2_INDEX) - && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2)) - bios_connectors[i].line_mux = - ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1; - else if ((i == ATOM_DEVICE_DFP3_INDEX) - && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1)) - bios_connectors[i].line_mux = - ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1; - else - bios_connectors[i].line_mux = - ci.sucI2cId.sbfAccess.bfI2C_LineMux; - } else - bios_connectors[i].line_mux = - ci.sucI2cId.sbfAccess.bfI2C_LineMux; + bios_connectors[i].line_mux = + ci.sucI2cId.ucAccess; /* give tv unique connector ids */ if (i == ATOM_DEVICE_TV1_INDEX) { @@ -582,8 +697,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct bios_connectors[i].line_mux = 52; } else bios_connectors[i].ddc_bus = - radeon_lookup_gpio(dev, - bios_connectors[i].line_mux); + radeon_lookup_i2c_gpio(rdev, + bios_connectors[i].line_mux); + + if ((crev > 1) && (frev > 1)) { + u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap; + switch (isb) { + case 0x4: + bios_connectors[i].hpd.hpd = RADEON_HPD_1; + break; + case 0xa: + bios_connectors[i].hpd.hpd = RADEON_HPD_2; + break; + default: + bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; + break; + } + } else { + if (i == ATOM_DEVICE_DFP1_INDEX) + bios_connectors[i].hpd.hpd = RADEON_HPD_1; + else if (i == ATOM_DEVICE_DFP2_INDEX) + bios_connectors[i].hpd.hpd = RADEON_HPD_2; + else + bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; + } /* Always set the connector type to VGA for CRT1/CRT2. if they are * shared with a DVI port, we'll pick up the DVI connector when we @@ -595,7 +732,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, - &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) + &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux, + &bios_connectors[i].hpd)) continue; bios_connectors[i].valid = true; @@ -610,41 +748,42 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct else radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, - (1 << - i), + (1 << i), dac), (1 << i)); } /* combine shared connectors */ - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { + for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { - for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { + for (j = 0; j < max_device; j++) { if (bios_connectors[j].valid && (i != j)) { if (bios_connectors[i].line_mux == bios_connectors[j].line_mux) { - if (((bios_connectors[i]. - devices & - (ATOM_DEVICE_DFP_SUPPORT)) - && (bios_connectors[j]. - devices & - (ATOM_DEVICE_CRT_SUPPORT))) - || - ((bios_connectors[j]. - devices & - (ATOM_DEVICE_DFP_SUPPORT)) - && (bios_connectors[i]. - devices & - (ATOM_DEVICE_CRT_SUPPORT)))) { - bios_connectors[i]. - devices |= - bios_connectors[j]. - devices; - bios_connectors[i]. - connector_type = - DRM_MODE_CONNECTOR_DVII; - bios_connectors[j]. - valid = false; + /* make sure not to combine LVDS */ + if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) { + bios_connectors[i].line_mux = 53; + bios_connectors[i].ddc_bus.valid = false; + continue; + } + if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) { + bios_connectors[j].line_mux = 53; + bios_connectors[j].ddc_bus.valid = false; + continue; + } + /* combine analog and digital for DVI-I */ + if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) && + (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) || + ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) && + (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) { + bios_connectors[i].devices |= + bios_connectors[j].devices; + bios_connectors[i].connector_type = + DRM_MODE_CONNECTOR_DVII; + if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) + bios_connectors[i].hpd = + bios_connectors[j].hpd; + bios_connectors[j].valid = false; } } } @@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct } /* add the connectors */ - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { + for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { uint16_t connector_object_id = atombios_get_connector_object_id(dev, @@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct connector_type, &bios_connectors[i].ddc_bus, false, 0, - connector_object_id); + connector_object_id, + &bios_connectors[i].hpd); } } @@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * family. */ - p1pll->pll_out_min = 64800; + if (!radeon_new_pll) + p1pll->pll_out_min = 64800; } p1pll->pll_in_min = @@ -797,6 +938,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) return false; } +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; +}; + +bool radeon_atombios_sideport_present(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + + atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, + &crev, &data_offset); + + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + + if (igp_info) { + switch (crev) { + case 1: + if (igp_info->info.ucMemoryType & 0xf0) + return true; + break; + case 2: + if (igp_info->info_2.ucMemoryType & 0x0f) + return true; + break; + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + break; + } + } + return false; +} + bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { @@ -861,6 +1039,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; struct radeon_atom_ss *ss = NULL; + int i; if (id > ATOM_MAX_SS_ENTRY) return NULL; @@ -878,12 +1057,18 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct if (!ss) return NULL; - ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); - ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; - ss->step = ss_info->asSS_Info[id].ucSS_Step; - ss->delay = ss_info->asSS_Info[id].ucSS_Delay; - ss->range = ss_info->asSS_Info[id].ucSS_Range; - ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; + for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { + if (ss_info->asSS_Info[i].ucSS_Id == id) { + ss->percentage = + le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); + ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; + ss->step = ss_info->asSS_Info[i].ucSS_Step; + ss->delay = ss_info->asSS_Info[i].ucSS_Delay; + ss->range = ss_info->asSS_Info[i].ucSS_Range; + ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; + break; + } + } } return ss; } @@ -901,7 +1086,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); - uint16_t data_offset; + uint16_t data_offset, misc; union lvds_info *lvds_info; uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; @@ -940,6 +1125,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; + + misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); + if (misc & ATOM_VSYNC_POLARITY) + lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; + if (misc & ATOM_HSYNC_POLARITY) + lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; + if (misc & ATOM_COMPOSITESYNC) + lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; + if (misc & ATOM_INTERLACE) + lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; + if (misc & ATOM_DOUBLE_CLOCK_MODE) + lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; + /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); @@ -1074,6 +1272,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, return true; } +enum radeon_tv_std +radeon_atombios_get_tv_info(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); + uint16_t data_offset; + uint8_t frev, crev; + struct _ATOM_ANALOG_TV_INFO *tv_info; + enum radeon_tv_std tv_std = TV_STD_NTSC; + + atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); + + tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); + + switch (tv_info->ucTV_BootUpDefaultStandard) { + case ATOM_TV_NTSC: + tv_std = TV_STD_NTSC; + DRM_INFO("Default TV standard: NTSC\n"); + break; + case ATOM_TV_NTSCJ: + tv_std = TV_STD_NTSC_J; + DRM_INFO("Default TV standard: NTSC-J\n"); + break; + case ATOM_TV_PAL: + tv_std = TV_STD_PAL; + DRM_INFO("Default TV standard: PAL\n"); + break; + case ATOM_TV_PALM: + tv_std = TV_STD_PAL_M; + DRM_INFO("Default TV standard: PAL-M\n"); + break; + case ATOM_TV_PALN: + tv_std = TV_STD_PAL_N; + DRM_INFO("Default TV standard: PAL-N\n"); + break; + case ATOM_TV_PALCN: + tv_std = TV_STD_PAL_CN; + DRM_INFO("Default TV standard: PAL-CN\n"); + break; + case ATOM_TV_PAL60: + tv_std = TV_STD_PAL_60; + DRM_INFO("Default TV standard: PAL-60\n"); + break; + case ATOM_TV_SECAM: + tv_std = TV_STD_SECAM; + DRM_INFO("Default TV standard: SECAM\n"); + break; + default: + tv_std = TV_STD_NTSC; + DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); + break; + } + return tv_std; +} + struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) { @@ -1109,6 +1362,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); + tv_dac->tv_std = radeon_atombios_get_tv_info(rdev); } return tv_dac; } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 10bd50a7db8..4ddfd4b5bc5 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -29,8 +29,8 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, unsigned sdomain, unsigned ddomain) { - struct radeon_object *dobj = NULL; - struct radeon_object *sobj = NULL; + struct radeon_bo *dobj = NULL; + struct radeon_bo *sobj = NULL; struct radeon_fence *fence = NULL; uint64_t saddr, daddr; unsigned long start_jiffies; @@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, size = bsize; n = 1024; - r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); + r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); if (r) { goto out_cleanup; } - r = radeon_object_pin(sobj, sdomain, &saddr); + r = radeon_bo_reserve(sobj, false); + if (unlikely(r != 0)) + goto out_cleanup; + r = radeon_bo_pin(sobj, sdomain, &saddr); + radeon_bo_unreserve(sobj); if (r) { goto out_cleanup; } - r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); + r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); if (r) { goto out_cleanup; } - r = radeon_object_pin(dobj, ddomain, &daddr); + r = radeon_bo_reserve(dobj, false); + if (unlikely(r != 0)) + goto out_cleanup; + r = radeon_bo_pin(dobj, ddomain, &daddr); + radeon_bo_unreserve(dobj); if (r) { goto out_cleanup; } @@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, } out_cleanup: if (sobj) { - radeon_object_unpin(sobj); - radeon_object_unref(&sobj); + r = radeon_bo_reserve(sobj, false); + if (likely(r == 0)) { + radeon_bo_unpin(sobj); + radeon_bo_unreserve(sobj); + } + radeon_bo_unref(&sobj); } if (dobj) { - radeon_object_unpin(dobj); - radeon_object_unref(&dobj); + r = radeon_bo_reserve(dobj, false); + if (likely(r == 0)) { + radeon_bo_unpin(dobj); + radeon_bo_unreserve(dobj); + } + radeon_bo_unref(&dobj); } if (fence) { radeon_fence_unref(&fence); diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index a8135416762..73c4405bf42 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) ref_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; + + if (ref_div == 0) + return 0; + sclk = fb_div / ref_div; post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; @@ -52,13 +56,13 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) else if (post_div == 3) sclk >>= 2; else if (post_div == 4) - sclk >>= 4; + sclk >>= 3; return sclk; } /* 10 khz */ -static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) +uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) { struct radeon_pll *mpll = &rdev->clock.mpll; uint32_t fb_div, ref_div, post_div, mclk; @@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) ref_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; + + if (ref_div == 0) + return 0; + mclk = fb_div / ref_div; post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; @@ -78,7 +86,7 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) else if (post_div == 3) mclk >>= 2; else if (post_div == 4) - mclk >>= 4; + mclk >>= 3; return mclk; } @@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev) ret = radeon_combios_get_clock_info(dev); if (ret) { - if (p1pll->reference_div < 2) - p1pll->reference_div = 12; + if (p1pll->reference_div < 2) { + if (!ASIC_IS_AVIVO(rdev)) { + u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV); + if (ASIC_IS_R300(rdev)) + p1pll->reference_div = + (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT; + else + p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK; + if (p1pll->reference_div < 2) + p1pll->reference_div = 12; + } else + p1pll->reference_div = 12; + } if (p2pll->reference_div < 2) p2pll->reference_div = 12; if (rdev->family < CHIP_RS600) { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 5253cbf6db1..579c8920e08 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, - uint16_t connector_object_id); + uint16_t connector_object_id, + struct radeon_hpd *hpd); /* from radeon_legacy_encoder.c */ extern void @@ -442,39 +443,71 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, } -struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) +static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, + int ddc_line) { struct radeon_i2c_bus_rec i2c; - i2c.mask_clk_mask = RADEON_GPIO_EN_1; - i2c.mask_data_mask = RADEON_GPIO_EN_0; - i2c.a_clk_mask = RADEON_GPIO_A_1; - i2c.a_data_mask = RADEON_GPIO_A_0; - i2c.put_clk_mask = RADEON_GPIO_EN_1; - i2c.put_data_mask = RADEON_GPIO_EN_0; - i2c.get_clk_mask = RADEON_GPIO_Y_1; - i2c.get_data_mask = RADEON_GPIO_Y_0; - if ((ddc_line == RADEON_LCD_GPIO_MASK) || - (ddc_line == RADEON_MDGPIO_EN_REG)) { - i2c.mask_clk_reg = ddc_line; - i2c.mask_data_reg = ddc_line; - i2c.a_clk_reg = ddc_line; - i2c.a_data_reg = ddc_line; - i2c.put_clk_reg = ddc_line; - i2c.put_data_reg = ddc_line; - i2c.get_clk_reg = ddc_line + 4; - i2c.get_data_reg = ddc_line + 4; + if (ddc_line == RADEON_GPIOPAD_MASK) { + i2c.mask_clk_reg = RADEON_GPIOPAD_MASK; + i2c.mask_data_reg = RADEON_GPIOPAD_MASK; + i2c.a_clk_reg = RADEON_GPIOPAD_A; + i2c.a_data_reg = RADEON_GPIOPAD_A; + i2c.en_clk_reg = RADEON_GPIOPAD_EN; + i2c.en_data_reg = RADEON_GPIOPAD_EN; + i2c.y_clk_reg = RADEON_GPIOPAD_Y; + i2c.y_data_reg = RADEON_GPIOPAD_Y; + } else if (ddc_line == RADEON_MDGPIO_MASK) { + i2c.mask_clk_reg = RADEON_MDGPIO_MASK; + i2c.mask_data_reg = RADEON_MDGPIO_MASK; + i2c.a_clk_reg = RADEON_MDGPIO_A; + i2c.a_data_reg = RADEON_MDGPIO_A; + i2c.en_clk_reg = RADEON_MDGPIO_EN; + i2c.en_data_reg = RADEON_MDGPIO_EN; + i2c.y_clk_reg = RADEON_MDGPIO_Y; + i2c.y_data_reg = RADEON_MDGPIO_Y; } else { + i2c.mask_clk_mask = RADEON_GPIO_EN_1; + i2c.mask_data_mask = RADEON_GPIO_EN_0; + i2c.a_clk_mask = RADEON_GPIO_A_1; + i2c.a_data_mask = RADEON_GPIO_A_0; + i2c.en_clk_mask = RADEON_GPIO_EN_1; + i2c.en_data_mask = RADEON_GPIO_EN_0; + i2c.y_clk_mask = RADEON_GPIO_Y_1; + i2c.y_data_mask = RADEON_GPIO_Y_0; + i2c.mask_clk_reg = ddc_line; i2c.mask_data_reg = ddc_line; i2c.a_clk_reg = ddc_line; i2c.a_data_reg = ddc_line; - i2c.put_clk_reg = ddc_line; - i2c.put_data_reg = ddc_line; - i2c.get_clk_reg = ddc_line; - i2c.get_data_reg = ddc_line; + i2c.en_clk_reg = ddc_line; + i2c.en_data_reg = ddc_line; + i2c.y_clk_reg = ddc_line; + i2c.y_data_reg = ddc_line; } + if (rdev->family < CHIP_R200) + i2c.hw_capable = false; + else { + switch (ddc_line) { + case RADEON_GPIO_VGA_DDC: + case RADEON_GPIO_DVI_DDC: + i2c.hw_capable = true; + break; + case RADEON_GPIO_MONID: + /* hw i2c on RADEON_GPIO_MONID doesn't seem to work + * reliably on some pre-r4xx hardware; not sure why. + */ + i2c.hw_capable = false; + break; + default: + i2c.hw_capable = false; + break; + } + } + i2c.mm_i2c = false; + i2c.i2c_id = 0; + if (ddc_line) i2c.valid = true; else @@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) uint16_t sclk, mclk; if (rdev->bios == NULL) - return NULL; + return false; pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); if (pll_info) { @@ -562,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) return false; } +bool radeon_combios_sideport_present(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + u16 igp_info; + + igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE); + + if (igp_info) { + if (RBIOS16(igp_info + 0x4)) + return true; + } + return false; +} + +static const uint32_t default_primarydac_adj[CHIP_LAST] = { + 0x00000808, /* r100 */ + 0x00000808, /* rv100 */ + 0x00000808, /* rs100 */ + 0x00000808, /* rv200 */ + 0x00000808, /* rs200 */ + 0x00000808, /* r200 */ + 0x00000808, /* rv250 */ + 0x00000000, /* rs300 */ + 0x00000808, /* rv280 */ + 0x00000808, /* r300 */ + 0x00000808, /* r350 */ + 0x00000808, /* rv350 */ + 0x00000808, /* rv380 */ + 0x00000808, /* r420 */ + 0x00000808, /* r423 */ + 0x00000808, /* rv410 */ + 0x00000000, /* rs400 */ + 0x00000000, /* rs480 */ +}; + +static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev, + struct radeon_encoder_primary_dac *p_dac) +{ + p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family]; + return; +} + struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder) @@ -571,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct uint16_t dac_info; uint8_t rev, bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; + int found = 0; - if (rdev->bios == NULL) + p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), + GFP_KERNEL); + + if (!p_dac) return NULL; + if (rdev->bios == NULL) + goto out; + /* check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { - p_dac = - kzalloc(sizeof(struct radeon_encoder_primary_dac), - GFP_KERNEL); - - if (!p_dac) - return NULL; - rev = RBIOS8(dac_info) & 0x3; if (rev < 2) { bg = RBIOS8(dac_info + 0x2) & 0xf; @@ -595,20 +670,26 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - + found = 1; } +out: + if (!found) /* fallback to defaults */ + radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); + return p_dac; } -static enum radeon_tv_std -radeon_combios_get_tv_info(struct radeon_encoder *encoder) +enum radeon_tv_std +radeon_combios_get_tv_info(struct radeon_device *rdev) { - struct drm_device *dev = encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; + struct drm_device *dev = rdev->ddev; uint16_t tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; + if (rdev->bios == NULL) + return tv_std; + tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (tv_info) { if (RBIOS8(tv_info + 6) == 'T') { @@ -746,7 +827,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); found = 1; } - tv_dac->tv_std = radeon_combios_get_tv_info(encoder); + tv_dac->tv_std = radeon_combios_get_tv_info(rdev); } if (!found) { /* then check CRT table */ @@ -993,8 +1074,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = { {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ - {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */ - {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ + { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */ + { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */ }; bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, @@ -1028,7 +1109,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { - ver = RBIOS8(tmds_info); DRM_INFO("DFP table revision: %d\n", ver); if (ver == 3) { @@ -1063,51 +1143,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, tmds->tmds_pll[i].value); } } - } else + } else { DRM_INFO("No TMDS info found in BIOS\n"); + return false; + } return true; } -struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) +bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, + struct radeon_encoder_ext_tmds *tmds) { - struct radeon_encoder_int_tmds *tmds = NULL; - bool ret; + struct drm_device *dev = encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_i2c_bus_rec i2c_bus; - tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); + /* default for macs */ + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); - if (!tmds) - return NULL; - - ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); - if (ret == false) - radeon_legacy_get_tmds_info_from_table(encoder, tmds); + /* XXX some macs have duallink chips */ + switch (rdev->mode_info.connector_table) { + case CT_POWERBOOK_EXTERNAL: + case CT_MINI_EXTERNAL: + default: + tmds->dvo_chip = DVO_SIL164; + tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ + break; + } - return tmds; + return true; } -void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) +bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, + struct radeon_encoder_ext_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; - uint16_t ext_tmds_info; - uint8_t ver; + uint16_t offset; + uint8_t ver, id, blocks, clk, data; + int i; + enum radeon_combios_ddc gpio; + struct radeon_i2c_bus_rec i2c_bus; if (rdev->bios == NULL) - return; + return false; - ext_tmds_info = - combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); - if (ext_tmds_info) { - ver = RBIOS8(ext_tmds_info); - DRM_INFO("External TMDS Table revision: %d\n", ver); - // TODO + tmds->i2c_bus = NULL; + if (rdev->flags & RADEON_IS_IGP) { + offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); + if (offset) { + ver = RBIOS8(offset); + DRM_INFO("GPIO Table revision: %d\n", ver); + blocks = RBIOS8(offset + 2); + for (i = 0; i < blocks; i++) { + id = RBIOS8(offset + 3 + (i * 5) + 0); + if (id == 136) { + clk = RBIOS8(offset + 3 + (i * 5) + 3); + data = RBIOS8(offset + 3 + (i * 5) + 4); + i2c_bus.valid = true; + i2c_bus.mask_clk_mask = (1 << clk); + i2c_bus.mask_data_mask = (1 << data); + i2c_bus.a_clk_mask = (1 << clk); + i2c_bus.a_data_mask = (1 << data); + i2c_bus.en_clk_mask = (1 << clk); + i2c_bus.en_data_mask = (1 << data); + i2c_bus.y_clk_mask = (1 << clk); + i2c_bus.y_data_mask = (1 << data); + i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK; + i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK; + i2c_bus.a_clk_reg = RADEON_GPIOPAD_A; + i2c_bus.a_data_reg = RADEON_GPIOPAD_A; + i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN; + i2c_bus.en_data_reg = RADEON_GPIOPAD_EN; + i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y; + i2c_bus.y_data_reg = RADEON_GPIOPAD_Y; + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); + tmds->dvo_chip = DVO_SIL164; + tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ + break; + } + } + } + } else { + offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); + if (offset) { + ver = RBIOS8(offset); + DRM_INFO("External TMDS Table revision: %d\n", ver); + tmds->slave_addr = RBIOS8(offset + 4 + 2); + tmds->slave_addr >>= 1; /* 7 bit addressing */ + gpio = RBIOS8(offset + 4 + 3); + switch (gpio) { + case DDC_MONID: + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); + break; + case DDC_DVI: + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); + break; + case DDC_VGA: + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); + break; + case DDC_CRT2: + /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ + if (rdev->family >= CHIP_R300) + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); + else + i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); + break; + case DDC_LCD: /* MM i2c */ + DRM_ERROR("MM i2c requires hw i2c engine\n"); + break; + default: + DRM_ERROR("Unsupported gpio %d\n", gpio); + break; + } + } } + + if (!tmds->i2c_bus) { + DRM_INFO("No valid Ext TMDS info found in BIOS\n"); + return false; + } + + return true; } bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_i2c_bus_rec ddc_i2c; + struct radeon_hpd hpd; rdev->mode_info.connector_table = radeon_connector_table; if (rdev->mode_info.connector_table == CT_NONE) { @@ -1168,7 +1336,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) /* these are the most common settings */ if (rdev->flags & RADEON_SINGLE_CRTC) { /* VGA - primary dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT1_SUPPORT, @@ -1178,10 +1347,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); } else if (rdev->flags & RADEON_IS_MOBILITY) { /* LVDS */ - ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); + ddc_i2c = combios_setup_i2c_bus(rdev, 0); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_LCD1_SUPPORT, @@ -1191,10 +1362,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); /* VGA - primary dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT1_SUPPORT, @@ -1204,10 +1377,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); } else { /* DVI-I - tv dac, int tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_1; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP1_SUPPORT, @@ -1223,10 +1398,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); /* VGA - primary dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT1_SUPPORT, @@ -1236,11 +1413,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); } if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { /* TV - tv dac */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1250,14 +1430,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); } break; case CT_IBOOK: DRM_INFO("Connector Table: %d (ibook)\n", rdev->mode_info.connector_table); /* LVDS */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_LCD1_SUPPORT, @@ -1265,9 +1447,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); /* VGA - TV DAC */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT2_SUPPORT, @@ -1275,8 +1459,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1285,13 +1472,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_POWERBOOK_EXTERNAL: DRM_INFO("Connector Table: %d (powerbook external tmds)\n", rdev->mode_info.connector_table); /* LVDS */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_LCD1_SUPPORT, @@ -1299,9 +1488,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); /* DVI-I - primary dac, ext tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP2_SUPPORT, @@ -1317,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I); + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1327,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_POWERBOOK_INTERNAL: DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", rdev->mode_info.connector_table); /* LVDS */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_LCD1_SUPPORT, @@ -1341,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); /* DVI-I - primary dac, int tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP1_SUPPORT, @@ -1358,8 +1556,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1368,13 +1569,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_POWERBOOK_VGA: DRM_INFO("Connector Table: %d (powerbook vga)\n", rdev->mode_info.connector_table); /* LVDS */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_LCD1_SUPPORT, @@ -1382,9 +1585,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_LCD1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); /* VGA - primary dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT1_SUPPORT, @@ -1392,8 +1597,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1402,13 +1610,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_MINI_EXTERNAL: DRM_INFO("Connector Table: %d (mini external tmds)\n", rdev->mode_info.connector_table); /* DVI-I - tv dac, ext tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP2_SUPPORT, @@ -1424,8 +1634,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1434,13 +1647,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_MINI_INTERNAL: DRM_INFO("Connector Table: %d (mini internal tmds)\n", rdev->mode_info.connector_table); /* DVI-I - tv dac, int tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP1_SUPPORT, @@ -1455,8 +1670,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1465,13 +1683,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_IMAC_G5_ISIGHT: DRM_INFO("Connector Table: %d (imac g5 isight)\n", rdev->mode_info.connector_table); /* DVI-D - int tmds */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); + hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_DFP1_SUPPORT, @@ -1479,9 +1699,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, DRM_MODE_CONNECTOR_DVID, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, + &hpd); /* VGA - tv dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT2_SUPPORT, @@ -1489,8 +1711,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1499,13 +1724,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; case CT_EMAC: DRM_INFO("Connector Table: %d (emac)\n", rdev->mode_info.connector_table); /* VGA - primary dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT1_SUPPORT, @@ -1513,9 +1740,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); /* VGA - tv dac */ - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_CRT2_SUPPORT, @@ -1523,8 +1752,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ATOM_DEVICE_CRT2_SUPPORT); radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, ATOM_DEVICE_TV1_SUPPORT, @@ -1533,7 +1765,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; default: DRM_INFO("Connector table: %d (invalid)\n", @@ -1550,7 +1783,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, int bios_index, enum radeon_combios_connector *legacy_connector, - struct radeon_i2c_bus_rec *ddc_i2c) + struct radeon_i2c_bus_rec *ddc_i2c, + struct radeon_hpd *hpd) { struct radeon_device *rdev = dev->dev_private; @@ -1558,29 +1792,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, if ((rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) && ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) - *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); + *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); else if ((rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) && ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) { - ddc_i2c->valid = true; + *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK); ddc_i2c->mask_clk_mask = (0x20 << 8); ddc_i2c->mask_data_mask = 0x80; ddc_i2c->a_clk_mask = (0x20 << 8); ddc_i2c->a_data_mask = 0x80; - ddc_i2c->put_clk_mask = (0x20 << 8); - ddc_i2c->put_data_mask = 0x80; - ddc_i2c->get_clk_mask = (0x20 << 8); - ddc_i2c->get_data_mask = 0x80; - ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK; - ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK; - ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A; - ddc_i2c->a_data_reg = RADEON_GPIOPAD_A; - ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN; - ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN; - ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG; - ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG; + ddc_i2c->en_clk_mask = (0x20 << 8); + ddc_i2c->en_data_mask = 0x80; + ddc_i2c->y_clk_mask = (0x20 << 8); + ddc_i2c->y_data_mask = 0x80; } + /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ + if ((rdev->family >= CHIP_R300) && + ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) + *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ if (dev->pdev->device == 0x515e && @@ -1624,6 +1855,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) dev->pdev->subsystem_device == 0x280a) return false; + /* MSI S270 has non-existent TV port */ + if (dev->pdev->device == 0x5955 && + dev->pdev->subsystem_vendor == 0x1462 && + dev->pdev->subsystem_device == 0x0131) + return false; + return true; } @@ -1671,6 +1908,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) enum radeon_combios_connector connector; int i = 0; struct radeon_i2c_bus_rec ddc_i2c; + struct radeon_hpd hpd; if (rdev->bios == NULL) return false; @@ -1691,26 +1929,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) switch (ddc_type) { case DDC_MONID: ddc_i2c = - combios_setup_i2c_bus(RADEON_GPIO_MONID); + combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); break; case DDC_DVI: ddc_i2c = - combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); break; case DDC_VGA: ddc_i2c = - combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); break; case DDC_CRT2: ddc_i2c = - combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); + combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); + break; + default: + break; + } + + switch (connector) { + case CONNECTOR_PROPRIETARY_LEGACY: + case CONNECTOR_DVI_I_LEGACY: + case CONNECTOR_DVI_D_LEGACY: + if ((tmp >> 4) & 0x1) + hpd.hpd = RADEON_HPD_2; + else + hpd.hpd = RADEON_HPD_1; break; default: + hpd.hpd = RADEON_HPD_NONE; break; } if (!radeon_apply_legacy_quirks(dev, i, &connector, - &ddc_i2c)) + &ddc_i2c, &hpd)) continue; switch (connector) { @@ -1727,7 +1979,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) legacy_connector_convert [connector], &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, + &hpd); break; case CONNECTOR_CRT_LEGACY: if (tmp & 0x1) { @@ -1753,7 +2006,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) legacy_connector_convert [connector], &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); break; case CONNECTOR_DVI_I_LEGACY: devices = 0; @@ -1799,7 +2053,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) legacy_connector_convert [connector], &ddc_i2c, - connector_object_id); + connector_object_id, + &hpd); break; case CONNECTOR_DVI_D_LEGACY: if ((tmp >> 4) & 0x1) { @@ -1817,7 +2072,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) legacy_connector_convert [connector], &ddc_i2c, - connector_object_id); + connector_object_id, + &hpd); break; case CONNECTOR_CTV_LEGACY: case CONNECTOR_STV_LEGACY: @@ -1832,7 +2088,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) legacy_connector_convert [connector], &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); break; default: DRM_ERROR("Unknown connector type: %d\n", @@ -1858,14 +2115,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 0), ATOM_DEVICE_DFP1_SUPPORT); - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_DFP1_SUPPORT, DRM_MODE_CONNECTOR_DVII, &ddc_i2c, - CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); } else { uint16_t crt_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); @@ -1876,13 +2135,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); - ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); + ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, DRM_MODE_CONNECTOR_VGA, &ddc_i2c, - CONNECTOR_OBJECT_ID_VGA); + CONNECTOR_OBJECT_ID_VGA, + &hpd); } else { DRM_DEBUG("No connector info found\n"); return false; @@ -1910,27 +2171,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) case DDC_MONID: ddc_i2c = combios_setup_i2c_bus - (RADEON_GPIO_MONID); + (rdev, RADEON_GPIO_MONID); break; case DDC_DVI: ddc_i2c = combios_setup_i2c_bus - (RADEON_GPIO_DVI_DDC); + (rdev, RADEON_GPIO_DVI_DDC); break; case DDC_VGA: ddc_i2c = combios_setup_i2c_bus - (RADEON_GPIO_VGA_DDC); + (rdev, RADEON_GPIO_VGA_DDC); break; case DDC_CRT2: ddc_i2c = combios_setup_i2c_bus - (RADEON_GPIO_CRT2_DDC); + (rdev, RADEON_GPIO_CRT2_DDC); break; case DDC_LCD: ddc_i2c = combios_setup_i2c_bus - (RADEON_LCD_GPIO_MASK); + (rdev, RADEON_GPIOPAD_MASK); ddc_i2c.mask_clk_mask = RBIOS32(lcd_ddc_info + 3); ddc_i2c.mask_data_mask = @@ -1939,19 +2200,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) RBIOS32(lcd_ddc_info + 3); ddc_i2c.a_data_mask = RBIOS32(lcd_ddc_info + 7); - ddc_i2c.put_clk_mask = + ddc_i2c.en_clk_mask = RBIOS32(lcd_ddc_info + 3); - ddc_i2c.put_data_mask = + ddc_i2c.en_data_mask = RBIOS32(lcd_ddc_info + 7); - ddc_i2c.get_clk_mask = + ddc_i2c.y_clk_mask = RBIOS32(lcd_ddc_info + 3); - ddc_i2c.get_data_mask = + ddc_i2c.y_data_mask = RBIOS32(lcd_ddc_info + 7); break; case DDC_GPIO: ddc_i2c = combios_setup_i2c_bus - (RADEON_MDGPIO_EN_REG); + (rdev, RADEON_MDGPIO_MASK); ddc_i2c.mask_clk_mask = RBIOS32(lcd_ddc_info + 3); ddc_i2c.mask_data_mask = @@ -1960,13 +2221,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) RBIOS32(lcd_ddc_info + 3); ddc_i2c.a_data_mask = RBIOS32(lcd_ddc_info + 7); - ddc_i2c.put_clk_mask = + ddc_i2c.en_clk_mask = RBIOS32(lcd_ddc_info + 3); - ddc_i2c.put_data_mask = + ddc_i2c.en_data_mask = RBIOS32(lcd_ddc_info + 7); - ddc_i2c.get_clk_mask = + ddc_i2c.y_clk_mask = RBIOS32(lcd_ddc_info + 3); - ddc_i2c.get_data_mask = + ddc_i2c.y_data_mask = RBIOS32(lcd_ddc_info + 7); break; default: @@ -1977,12 +2238,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) } else ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_connector(dev, 5, ATOM_DEVICE_LCD1_SUPPORT, DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, - CONNECTOR_OBJECT_ID_LVDS); + CONNECTOR_OBJECT_ID_LVDS, + &hpd); } } @@ -1993,6 +2256,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if (tv_info) { if (RBIOS8(tv_info + 6) == 'T') { if (radeon_apply_legacy_tv_quirks(dev)) { + hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, radeon_get_encoder_id (dev, @@ -2003,7 +2267,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) ATOM_DEVICE_TV1_SUPPORT, DRM_MODE_CONNECTOR_SVIDEO, &ddc_i2c, - CONNECTOR_OBJECT_ID_SVIDEO); + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); } } } @@ -2014,6 +2279,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) return true; } +void radeon_external_tmds_setup(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; + + if (!tmds) + return; + + switch (tmds->dvo_chip) { + case DVO_SIL164: + /* sil 164 */ + radeon_i2c_do_lock(tmds->i2c_bus, 1); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x08, 0x30); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x09, 0x00); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x0a, 0x90); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x0c, 0x89); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x08, 0x3b); + radeon_i2c_do_lock(tmds->i2c_bus, 0); + break; + case DVO_SIL1178: + /* sil 1178 - untested */ + /* + * 0x0f, 0x44 + * 0x0f, 0x4c + * 0x0e, 0x01 + * 0x0a, 0x80 + * 0x09, 0x30 + * 0x0c, 0xc9 + * 0x0d, 0x70 + * 0x08, 0x32 + * 0x08, 0x33 + */ + break; + default: + break; + } + +} + +bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + uint16_t offset; + uint8_t blocks, slave_addr, rev; + uint32_t index, id; + uint32_t reg, val, and_mask, or_mask; + struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; + + if (rdev->bios == NULL) + return false; + + if (!tmds) + return false; + + if (rdev->flags & RADEON_IS_IGP) { + offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE); + rev = RBIOS8(offset); + if (offset) { + rev = RBIOS8(offset); + if (rev > 1) { + blocks = RBIOS8(offset + 3); + index = offset + 4; + while (blocks > 0) { + id = RBIOS16(index); + index += 2; + switch (id >> 13) { + case 0: + reg = (id & 0x1fff) * 4; + val = RBIOS32(index); + index += 4; + WREG32(reg, val); + break; + case 2: + reg = (id & 0x1fff) * 4; + and_mask = RBIOS32(index); + index += 4; + or_mask = RBIOS32(index); + index += 4; + val = RREG32(reg); + val = (val & and_mask) | or_mask; + WREG32(reg, val); + break; + case 3: + val = RBIOS16(index); + index += 2; + udelay(val); + break; + case 4: + val = RBIOS16(index); + index += 2; + udelay(val * 1000); + break; + case 6: + slave_addr = id & 0xff; + slave_addr >>= 1; /* 7 bit addressing */ + index++; + reg = RBIOS8(index); + index++; + val = RBIOS8(index); + index++; + radeon_i2c_do_lock(tmds->i2c_bus, 1); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + slave_addr, + reg, val); + radeon_i2c_do_lock(tmds->i2c_bus, 0); + break; + default: + DRM_ERROR("Unknown id %d\n", id >> 13); + break; + } + blocks--; + } + return true; + } + } + } else { + offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); + if (offset) { + index = offset + 10; + id = RBIOS16(index); + while (id != 0xffff) { + index += 2; + switch (id >> 13) { + case 0: + reg = (id & 0x1fff) * 4; + val = RBIOS32(index); + WREG32(reg, val); + break; + case 2: + reg = (id & 0x1fff) * 4; + and_mask = RBIOS32(index); + index += 4; + or_mask = RBIOS32(index); + index += 4; + val = RREG32(reg); + val = (val & and_mask) | or_mask; + WREG32(reg, val); + break; + case 4: + val = RBIOS16(index); + index += 2; + udelay(val); + break; + case 5: + reg = id & 0x1fff; + and_mask = RBIOS32(index); + index += 4; + or_mask = RBIOS32(index); + index += 4; + val = RREG32_PLL(reg); + val = (val & and_mask) | or_mask; + WREG32_PLL(reg, val); + break; + case 6: + reg = id & 0x1fff; + val = RBIOS8(index); + index += 1; + radeon_i2c_do_lock(tmds->i2c_bus, 1); + radeon_i2c_sw_put_byte(tmds->i2c_bus, + tmds->slave_addr, + reg, val); + radeon_i2c_do_lock(tmds->i2c_bus, 0); + break; + default: + DRM_ERROR("Unknown id %d\n", id >> 13); + break; + } + id = RBIOS16(index); + } + return true; + } + } + return false; +} + static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) { struct radeon_device *rdev = dev->dev_private; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 29763ceae3a..55266416fa4 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -40,6 +40,28 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected); +void radeon_connector_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); + + if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { + if (radeon_dp_needs_link_train(radeon_connector)) { + if (connector->encoder) + dp_link_train(connector->encoder, connector); + } + } + } + +} + static void radeon_property_change_mode(struct drm_encoder *encoder) { struct drm_crtc *crtc = encoder->crtc; @@ -188,6 +210,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode drm_mode_set_name(mode); DRM_DEBUG("Adding native panel mode %s\n", mode->name); + } else if (native_mode->hdisplay != 0 && + native_mode->vdisplay != 0) { + /* mac laptops without an edid */ + /* Note that this is not necessarily the exact panel mode, + * but an approximation based on the cvt formula. For these + * systems we should ideally read the mode info out of the + * registers or add a mode table, but this works and is much + * simpler. + */ + mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; + DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name); } return mode; } @@ -445,10 +479,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec ret = connector_status_connected; else { if (radeon_connector->ddc_bus) { - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (radeon_connector->edid) ret = connector_status_connected; } @@ -553,17 +587,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect if (!encoder) ret = connector_status_disconnected; - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", @@ -583,7 +617,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect ret = connector_status_connected; } } else { - if (radeon_connector->dac_load_detect) { + if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } @@ -708,17 +742,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect enum drm_connector_status ret = connector_status_disconnected; bool dret; - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", @@ -735,6 +769,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect ret = connector_status_disconnected; } else ret = connector_status_connected; + + /* multiple connectors on the same encoder with the same ddc line + * This tends to be HDMI and DVI on the same encoder with the + * same ddc line. If the edid says HDMI, consider the HDMI port + * connected and the DVI port disconnected. If the edid doesn't + * say HDMI, vice versa. + */ + if (radeon_connector->shared_ddc && connector_status_connected) { + struct drm_device *dev = connector->dev; + struct drm_connector *list_connector; + struct radeon_connector *list_radeon_connector; + list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { + if (connector == list_connector) + continue; + list_radeon_connector = to_radeon_connector(list_connector); + if (radeon_connector->devices == list_radeon_connector->devices) { + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; + ret = connector_status_disconnected; + } + } else { + if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || + (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; + ret = connector_status_disconnected; + } + } + } + } + } } } @@ -833,10 +900,18 @@ static void radeon_dvi_force(struct drm_connector *connector) static int radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* XXX check mode bandwidth */ + /* clocks over 135 MHz have heat issues with DVI on RV100 */ + if (radeon_connector->use_digital && + (rdev->family == CHIP_RV100) && + (mode->clock > 135000)) + return MODE_CLOCK_HIGH; + if (radeon_connector->use_digital && (mode->clock > 165000)) { if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || @@ -863,6 +938,93 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = { .force = radeon_dvi_force, }; +static void radeon_dp_connector_destroy(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + + if (radeon_connector->ddc_bus) + radeon_i2c_destroy(radeon_connector->ddc_bus); + if (radeon_connector->edid) + kfree(radeon_connector->edid); + if (radeon_dig_connector->dp_i2c_bus) + radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); + kfree(radeon_connector->con_priv); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static int radeon_dp_get_modes(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int ret; + + ret = radeon_ddc_get_modes(radeon_connector); + return ret; +} + +static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + enum drm_connector_status ret = connector_status_disconnected; + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + u8 sink_type; + + if (radeon_connector->edid) { + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; + } + + sink_type = radeon_dp_getsinktype(radeon_connector); + if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (sink_type == CONNECTOR_OBJECT_ID_eDP)) { + if (radeon_dp_getdpcd(radeon_connector)) { + radeon_dig_connector->dp_sink_type = sink_type; + ret = connector_status_connected; + } + } else { + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + if (radeon_ddc_probe(radeon_connector)) { + radeon_dig_connector->dp_sink_type = sink_type; + ret = connector_status_connected; + } + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); + } + + return ret; +} + +static int radeon_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; + + /* XXX check mode bandwidth */ + + if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + return radeon_dp_mode_valid_helper(radeon_connector, mode); + else + return MODE_OK; +} + +struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { + .get_modes = radeon_dp_get_modes, + .mode_valid = radeon_dp_mode_valid, + .best_encoder = radeon_dvi_encoder, +}; + +struct drm_connector_funcs radeon_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = radeon_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = radeon_connector_set_property, + .destroy = radeon_dp_connector_destroy, + .force = radeon_dvi_force, +}; + void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, @@ -871,7 +1033,8 @@ radeon_add_atom_connector(struct drm_device *dev, struct radeon_i2c_bus_rec *i2c_bus, bool linkb, uint32_t igp_lane_info, - uint16_t connector_object_id) + uint16_t connector_object_id, + struct radeon_hpd *hpd) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; @@ -911,6 +1074,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_connector->devices = supported_device; radeon_connector->shared_ddc = shared_ddc; radeon_connector->connector_object_id = connector_object_id; + radeon_connector->hpd = *hpd; switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -963,10 +1127,12 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - radeon_connector->dac_load_detect = true; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); + if (connector_type == DRM_MODE_CONNECTOR_DVII) { + radeon_connector->dac_load_detect = true; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); + } break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: @@ -991,22 +1157,36 @@ radeon_add_atom_connector(struct drm_device *dev, subpixel_order = SubPixelHorizontalRGB; break; case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; - drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); - ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); + drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); + ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (ret) goto failed; if (i2c_bus->valid) { - radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); + /* add DP i2c bus */ + if (connector_type == DRM_MODE_CONNECTOR_eDP) + radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); + else + radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); + if (!radeon_dig_connector->dp_i2c_bus) + goto failed; + if (connector_type == DRM_MODE_CONNECTOR_eDP) + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP"); + else + radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); if (!radeon_connector->ddc_bus) goto failed; } subpixel_order = SubPixelHorizontalRGB; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.coherent_mode_property, + 1); break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1020,6 +1200,9 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.tv_std_property, + radeon_atombios_get_tv_info(rdev)); } break; case DRM_MODE_CONNECTOR_LVDS: @@ -1038,7 +1221,6 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } - drm_mode_create_scaling_mode_property(dev); drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); @@ -1063,7 +1245,8 @@ radeon_add_legacy_connector(struct drm_device *dev, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, - uint16_t connector_object_id) + uint16_t connector_object_id, + struct radeon_hpd *hpd) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; @@ -1093,6 +1276,7 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->connector_id = connector_id; radeon_connector->devices = supported_device; radeon_connector->connector_object_id = connector_object_id; + radeon_connector->hpd = *hpd; switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1160,6 +1344,9 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.tv_std_property, + radeon_combios_get_tv_info(rdev)); } break; case DRM_MODE_CONNECTOR_LVDS: diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 4f7afc79dd8..06123ba31d3 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = GET_SCRATCH(dev_priv, 1); DRM_DEBUG("done_age = %d\n", done_age); - for (i = start; i < dma->buf_count; i++) { - buf = dma->buflist[i]; + for (i = 0; i < dma->buf_count; i++) { + buf = dma->buflist[start]; buf_priv = buf->dev_private; if (buf->file_priv == NULL || (buf->pending && buf_priv->age <= @@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) buf->pending = 0; return buf; } - start = 0; + if (++start >= dma->buf_count) + start = 0; } if (t) { @@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) } } - DRM_DEBUG("returning NULL!\n"); return NULL; } -#if 0 -struct drm_buf *radeon_freelist_get(struct drm_device * dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv; - struct drm_buf *buf; - int i, t; - int start; - u32 done_age; - - done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); - if (++dev_priv->last_buf >= dma->buf_count) - dev_priv->last_buf = 0; - - start = dev_priv->last_buf; - dev_priv->stats.freelist_loops++; - - for (t = 0; t < 2; t++) { - for (i = start; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if (buf->file_priv == 0 || (buf->pending && - buf_priv->age <= - done_age)) { - dev_priv->stats.requested_bufs++; - buf->pending = 0; - return buf; - } - } - start = 0; - } - - return NULL; -} -#endif - void radeon_freelist_reset(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; @@ -2182,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); + kfree(master_priv); return ret; } master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5ab2cf96a26..1190148cf5e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) } p->relocs_ptr[i] = &p->relocs[i]; p->relocs[i].robj = p->relocs[i].gobj->driver_private; - p->relocs[i].lobj.robj = p->relocs[i].robj; + p->relocs[i].lobj.bo = p->relocs[i].robj; p->relocs[i].lobj.rdomain = r->read_domains; p->relocs[i].lobj.wdomain = r->write_domain; p->relocs[i].handle = r->handle; p->relocs[i].flags = r->flags; INIT_LIST_HEAD(&p->relocs[i].lobj.list); - radeon_object_list_add_object(&p->relocs[i].lobj, - &p->validated); + radeon_bo_list_add_object(&p->relocs[i].lobj, + &p->validated); } } - return radeon_object_list_validate(&p->validated, p->ib->fence); + return radeon_bo_list_validate(&p->validated, p->ib->fence); } int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) @@ -189,10 +189,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (error) { - radeon_object_list_unvalidate(&parser->validated); + if (error && parser->ib) { + radeon_bo_list_unvalidate(&parser->validated, + parser->ib->fence); } else { - radeon_object_list_clean(&parser->validated); + radeon_bo_list_unreserve(&parser->validated); } for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) { @@ -230,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; + parser.dev = rdev->dev; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 41bb76fbe73..768b1509fa0 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev) if (rdev->family < CHIP_R600) { int i; - for (i = 0; i < 8; i++) { - WREG32(RADEON_SURFACE0_INFO + - i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), - 0); + for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { + if (rdev->surface_regs[i].bo) + radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); + else + radeon_clear_surface_reg(rdev, i); } /* enable surfaces */ WREG32(RADEON_SURFACE_CNTL, 0); @@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev) } +bool radeon_boot_test_post_card(struct radeon_device *rdev) +{ + if (radeon_card_posted(rdev)) + return true; + + if (rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + if (rdev->is_atom_bios) + atom_asic_init(rdev->mode_info.atom_context); + else + radeon_combios_asic_init(rdev->ddev); + return true; + } else { + dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); + return false; + } +} + int radeon_dummy_page_init(struct radeon_device *rdev) { rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); @@ -372,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev) /* FIXME: not supported yet */ return -EINVAL; } + + if (rdev->flags & RADEON_IS_IGP) { + rdev->asic->get_memory_clock = NULL; + rdev->asic->set_memory_clock = NULL; + } + return 0; } @@ -462,13 +487,18 @@ int radeon_atombios_init(struct radeon_device *rdev) atom_card_info->pll_write = cail_pll_write; rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); + mutex_init(&rdev->mode_info.atom_context->mutex); radeon_atom_initialize_bios_scratch_regs(rdev->ddev); + atom_allocate_fb_scratch(rdev->mode_info.atom_context); return 0; } void radeon_atombios_fini(struct radeon_device *rdev) { - kfree(rdev->mode_info.atom_context); + if (rdev->mode_info.atom_context) { + kfree(rdev->mode_info.atom_context->scratch); + kfree(rdev->mode_info.atom_context); + } kfree(rdev->mode_info.atom_card_info); } @@ -514,11 +544,75 @@ void radeon_agp_disable(struct radeon_device *rdev) rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart_set_page = &r100_pci_gart_set_page; } + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; +} + +void radeon_check_arguments(struct radeon_device *rdev) +{ + /* vramlimit must be a power of two */ + switch (radeon_vram_limit) { + case 0: + case 4: + case 8: + case 16: + case 32: + case 64: + case 128: + case 256: + case 512: + case 1024: + case 2048: + case 4096: + break; + default: + dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", + radeon_vram_limit); + radeon_vram_limit = 0; + break; + } + radeon_vram_limit = radeon_vram_limit << 20; + /* gtt size must be power of two and greater or equal to 32M */ + switch (radeon_gart_size) { + case 4: + case 8: + case 16: + dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", + radeon_gart_size); + radeon_gart_size = 512; + break; + case 32: + case 64: + case 128: + case 256: + case 512: + case 1024: + case 2048: + case 4096: + break; + default: + dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", + radeon_gart_size); + radeon_gart_size = 512; + break; + } + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + /* AGP mode can only be -1, 1, 2, 4, 8 */ + switch (radeon_agpmode) { + case -1: + case 0: + case 1: + case 2: + case 4: + case 8: + break; + default: + dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " + "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); + radeon_agpmode = 0; + break; + } } -/* - * Radeon device. - */ int radeon_device_init(struct radeon_device *rdev, struct drm_device *ddev, struct pci_dev *pdev, @@ -544,16 +638,24 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->cp.mutex); + if (rdev->family >= CHIP_R600) + spin_lock_init(&rdev->ih.lock); + mutex_init(&rdev->gem.mutex); rwlock_init(&rdev->fence_drv.lock); INIT_LIST_HEAD(&rdev->gem.objects); + /* setup workqueue */ + rdev->wq = create_workqueue("radeon"); + if (rdev->wq == NULL) + return -ENOMEM; + /* Set asic functions */ r = radeon_asic_init(rdev); - if (r) { + if (r) return r; - } + radeon_check_arguments(rdev); - if (radeon_agpmode == -1) { + if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { radeon_agp_disable(rdev); } @@ -620,6 +722,7 @@ void radeon_device_fini(struct radeon_device *rdev) DRM_INFO("radeon: finishing device.\n"); rdev->shutdown = true; radeon_fini(rdev); + destroy_workqueue(rdev->wq); vga_client_register(rdev->pdev, NULL, NULL, NULL); iounmap(rdev->rmmio); rdev->rmmio = NULL; @@ -631,38 +734,46 @@ void radeon_device_fini(struct radeon_device *rdev) */ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) { - struct radeon_device *rdev = dev->dev_private; + struct radeon_device *rdev; struct drm_crtc *crtc; + int r; - if (dev == NULL || rdev == NULL) { + if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; } if (state.event == PM_EVENT_PRETHAW) { return 0; } + rdev = dev->dev_private; + /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); - struct radeon_object *robj; + struct radeon_bo *robj; if (rfb == NULL || rfb->obj == NULL) { continue; } robj = rfb->obj->driver_private; - if (robj != rdev->fbdev_robj) { - radeon_object_unpin(robj); + if (robj != rdev->fbdev_rbo) { + r = radeon_bo_reserve(robj, false); + if (unlikely(r == 0)) { + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); + } } } /* evict vram memory */ - radeon_object_evict_vram(rdev); + radeon_bo_evict_vram(rdev); /* wait for gpu to finish processing current batch */ radeon_fence_wait_last(rdev); radeon_save_bios_scratch_regs(rdev); radeon_suspend(rdev); + radeon_hpd_fini(rdev); /* evict remaining vram memory */ - radeon_object_evict_vram(rdev); + radeon_bo_evict_vram(rdev); pci_save_state(dev->pdev); if (state.event == PM_EVENT_SUSPEND) { @@ -695,6 +806,8 @@ int radeon_resume_kms(struct drm_device *dev) fb_set_suspend(rdev->fbdev_info, 0); release_console_sem(); + /* reset hpd state */ + radeon_hpd_init(rdev); /* blat the mode back in */ drm_helper_resume_force_mode(dev); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c85df4afcb7..6a92f994cc2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -234,7 +234,7 @@ static const char *encoder_names[34] = { "INTERNAL_UNIPHY2", }; -static const char *connector_names[13] = { +static const char *connector_names[15] = { "Unknown", "VGA", "DVI-I", @@ -248,6 +248,18 @@ static const char *connector_names[13] = { "DisplayPort", "HDMI-A", "HDMI-B", + "TV", + "eDP", +}; + +static const char *hpd_names[7] = { + "NONE", + "HPD1", + "HPD2", + "HPD3", + "HPD4", + "HPD5", + "HPD6", }; static void radeon_print_display_setup(struct drm_device *dev) @@ -264,16 +276,18 @@ static void radeon_print_display_setup(struct drm_device *dev) radeon_connector = to_radeon_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector_names[connector->connector_type]); + if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) + DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); if (radeon_connector->ddc_bus) DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", radeon_connector->ddc_bus->rec.mask_clk_reg, radeon_connector->ddc_bus->rec.mask_data_reg, radeon_connector->ddc_bus->rec.a_clk_reg, radeon_connector->ddc_bus->rec.a_data_reg, - radeon_connector->ddc_bus->rec.put_clk_reg, - radeon_connector->ddc_bus->rec.put_data_reg, - radeon_connector->ddc_bus->rec.get_clk_reg, - radeon_connector->ddc_bus->rec.get_data_reg); + radeon_connector->ddc_bus->rec.en_clk_reg, + radeon_connector->ddc_bus->rec.en_data_reg, + radeon_connector->ddc_bus->rec.y_clk_reg, + radeon_connector->ddc_bus->rec.y_data_reg); DRM_INFO(" Encoders:\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); @@ -317,13 +331,17 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) ret = radeon_get_atom_connector_info_from_object_table(dev); else ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); - } else + } else { ret = radeon_get_legacy_connector_info_from_bios(dev); + if (ret == false) + ret = radeon_get_legacy_connector_info_from_table(dev); + } } else { if (!ASIC_IS_AVIVO(rdev)) ret = radeon_get_legacy_connector_info_from_table(dev); } if (ret) { + radeon_setup_encoder_clones(dev); radeon_print_display_setup(dev); list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) radeon_ddc_dump(drm_connector); @@ -336,12 +354,19 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) { int ret = 0; + if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || + dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); + } if (!radeon_connector->ddc_bus) return -1; if (!radeon_connector->edid) { - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); } if (radeon_connector->edid) { @@ -361,9 +386,9 @@ static int radeon_ddc_dump(struct drm_connector *connector) if (!radeon_connector->ddc_bus) return -1; - radeon_i2c_do_lock(radeon_connector, 1); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector, 0); + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (edid) { kfree(edid); } @@ -386,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll, uint32_t *fb_div_p, uint32_t *frac_fb_div_p, uint32_t *ref_div_p, - uint32_t *post_div_p, - int flags) + uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; + uint32_t min_post_div = pll->min_post_div; + uint32_t max_post_div = pll->max_post_div; uint32_t min_fractional_feed_div = 0; uint32_t max_fractional_feed_div = 0; uint32_t best_vco = pll->best_vco; @@ -406,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll, DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; - if (flags & RADEON_PLL_USE_REF_DIV) + if (pll->flags & RADEON_PLL_USE_REF_DIV) min_ref_div = max_ref_div = pll->reference_div; else { while (min_ref_div < max_ref_div-1) { @@ -421,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll, } } - if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { + if (pll->flags & RADEON_PLL_USE_POST_DIV) + min_post_div = max_post_div = pll->post_div; + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { min_fractional_feed_div = pll->min_frac_feedback_div; max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { + for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { uint32_t ref_div; - if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) + if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) continue; /* legacy radeons only have a few post_divs */ - if (flags & RADEON_PLL_LEGACY) { + if (pll->flags & RADEON_PLL_LEGACY) { if ((post_div == 5) || (post_div == 7) || (post_div == 9) || @@ -480,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll, tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; current_freq = radeon_div(tmp, ref_div * post_div); - if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { + if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { error = freq - current_freq; error = error < 0 ? 0xffffffff : error; } else @@ -507,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll, best_freq = current_freq; best_error = error; best_vco_diff = vco_diff; - } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || - ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || - ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || - ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || - ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || - ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { + } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || + ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || + ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || + ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { best_post_div = post_div; best_ref_div = ref_div; best_feedback_div = feedback_div; @@ -542,6 +571,97 @@ void radeon_compute_pll(struct radeon_pll *pll, *post_div_p = best_post_div; } +void radeon_compute_pll_avivo(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) +{ + fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; + fixed20_12 pll_out_max, pll_out_min; + fixed20_12 pll_in_max, pll_in_min; + fixed20_12 reference_freq; + fixed20_12 error, ffreq, a, b; + + pll_out_max.full = rfixed_const(pll->pll_out_max); + pll_out_min.full = rfixed_const(pll->pll_out_min); + pll_in_max.full = rfixed_const(pll->pll_in_max); + pll_in_min.full = rfixed_const(pll->pll_in_min); + reference_freq.full = rfixed_const(pll->reference_freq); + do_div(freq, 10); + ffreq.full = rfixed_const(freq); + error.full = rfixed_const(100 * 100); + + /* max p */ + p.full = rfixed_div(pll_out_max, ffreq); + p.full = rfixed_floor(p); + + /* min m */ + m.full = rfixed_div(reference_freq, pll_in_max); + m.full = rfixed_ceil(m); + + while (1) { + n.full = rfixed_div(ffreq, reference_freq); + n.full = rfixed_mul(n, m); + n.full = rfixed_mul(n, p); + + f_vco.full = rfixed_div(n, m); + f_vco.full = rfixed_mul(f_vco, reference_freq); + + f_pclk.full = rfixed_div(f_vco, p); + + if (f_pclk.full > ffreq.full) + error.full = f_pclk.full - ffreq.full; + else + error.full = ffreq.full - f_pclk.full; + error.full = rfixed_div(error, f_pclk); + a.full = rfixed_const(100 * 100); + error.full = rfixed_mul(error, a); + + a.full = rfixed_mul(m, p); + a.full = rfixed_div(n, a); + best_freq.full = rfixed_mul(reference_freq, a); + + if (rfixed_trunc(error) < 25) + break; + + a.full = rfixed_const(1); + m.full = m.full + a.full; + a.full = rfixed_div(reference_freq, m); + if (a.full >= pll_in_min.full) + continue; + + m.full = rfixed_div(reference_freq, pll_in_max); + m.full = rfixed_ceil(m); + a.full= rfixed_const(1); + p.full = p.full - a.full; + a.full = rfixed_mul(p, ffreq); + if (a.full >= pll_out_min.full) + continue; + else { + DRM_ERROR("Unable to find pll dividers\n"); + break; + } + } + + a.full = rfixed_const(10); + b.full = rfixed_mul(n, a); + + frac_n.full = rfixed_floor(n); + frac_n.full = rfixed_mul(frac_n, a); + frac_n.full = b.full - frac_n.full; + + *dot_clock_p = rfixed_trunc(best_freq); + *fb_div_p = rfixed_trunc(n); + *frac_fb_div_p = rfixed_trunc(frac_n); + *ref_div_p = rfixed_trunc(m); + *post_div_p = rfixed_trunc(p); + + DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); +} + static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); @@ -551,7 +671,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) radeonfb_remove(dev, fb); if (radeon_fb->obj) { - radeon_gem_object_unpin(radeon_fb->obj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(radeon_fb->obj); mutex_unlock(&dev->struct_mutex); @@ -599,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev, struct drm_gem_object *obj; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); - + if (obj == NULL) { + dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " + "can't create framebuffer\n", mode_cmd->handle); + return NULL; + } return radeon_framebuffer_create(dev, mode_cmd, obj); } @@ -629,7 +752,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] = { TV_STD_SECAM, "secam" }, }; -int radeon_modeset_create_props(struct radeon_device *rdev) +static int radeon_modeset_create_props(struct radeon_device *rdev) { int i, sz; @@ -642,7 +765,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) return -ENOMEM; rdev->mode_info.coherent_mode_property->values[0] = 0; - rdev->mode_info.coherent_mode_property->values[0] = 1; + rdev->mode_info.coherent_mode_property->values[1] = 1; } if (!ASIC_IS_AVIVO(rdev)) { @@ -666,7 +789,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev) if (!rdev->mode_info.load_detect_property) return -ENOMEM; rdev->mode_info.load_detect_property->values[0] = 0; - rdev->mode_info.load_detect_property->values[0] = 1; + rdev->mode_info.load_detect_property->values[1] = 1; drm_mode_create_scaling_mode_property(rdev->ddev); @@ -723,6 +846,8 @@ int radeon_modeset_init(struct radeon_device *rdev) if (!ret) { return ret; } + /* initialize hpd */ + radeon_hpd_init(rdev); drm_helper_initial_config(rdev->ddev); return 0; } @@ -730,6 +855,7 @@ int radeon_modeset_init(struct radeon_device *rdev) void radeon_modeset_fini(struct radeon_device *rdev) { if (rdev->mode_info.mode_config_initialized) { + radeon_hpd_fini(rdev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } @@ -750,9 +876,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; if (first) { - radeon_crtc->rmx_type = radeon_encoder->rmx_type; + /* set scaling */ + if (radeon_encoder->rmx_type == RMX_OFF) + radeon_crtc->rmx_type = RMX_OFF; + else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || + mode->vdisplay < radeon_encoder->native_mode.vdisplay) + radeon_crtc->rmx_type = radeon_encoder->rmx_type; + else + radeon_crtc->rmx_type = RMX_OFF; + /* copy native mode */ memcpy(&radeon_crtc->native_mode, - &radeon_encoder->native_mode, + &radeon_encoder->native_mode, sizeof(struct drm_display_mode)); first = false; } else { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 7f50fb864af..8ba3de7994d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -86,6 +86,8 @@ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; +int radeon_new_pll = 1; +int radeon_audio = 1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -120,6 +122,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); module_param_named(tv, radeon_tv, int, 0444); +MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); +module_param_named(new_pll, radeon_new_pll, int, 0444); + +MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); +module_param_named(audio, radeon_audio, int, 0444); + static int radeon_suspend(struct drm_device *dev, pm_message_t state) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -188,7 +196,7 @@ static struct drm_driver driver_old = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .ioctl = drm_ioctl, + .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, @@ -276,7 +284,7 @@ static struct drm_driver kms_driver = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .ioctl = drm_ioctl, + .unlocked_ioctl = drm_ioctl, .mmap = radeon_mmap, .poll = drm_poll, .fasync = drm_fasync, diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 350962e0f34..e13785282a8 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); # define R600_IT_WAIT_REG_MEM 0x00003C00 # define R600_IT_MEM_WRITE 0x00003D00 # define R600_IT_INDIRECT_BUFFER 0x00003200 -# define R600_IT_CP_INTERRUPT 0x00004000 # define R600_IT_SURFACE_SYNC 0x00004300 # define R600_CB0_DEST_BASE_ENA (1 << 6) # define R600_TC_ACTION_ENA (1 << 23) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index d42bc512d75..3c91724457c 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -35,6 +35,51 @@ extern int atom_debug; bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode); +static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *clone_encoder; + uint32_t index_mask = 0; + int count; + + /* DIG routing gets problematic */ + if (rdev->family >= CHIP_R600) + return index_mask; + /* LVDS/TV are too wacky */ + if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return index_mask; + /* DVO requires 2x ppll clocks depending on tmds chip */ + if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) + return index_mask; + + count = -1; + list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); + count++; + + if (clone_encoder == encoder) + continue; + if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) + continue; + if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) + continue; + else + index_mask |= (1 << count); + } + return index_mask; +} + +void radeon_setup_encoder_clones(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder->possible_clones = radeon_encoder_clones(encoder); + } +} + uint32_t radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) { @@ -111,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t return ret; } +static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + return true; + default: + return false; + } +} void radeon_link_encoder_connector(struct drm_device *dev) { @@ -157,35 +222,12 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); - if (radeon_encoder->devices & radeon_connector->devices) + if (radeon_encoder->active_device & radeon_connector->devices) return connector; } return NULL; } -/* used for both atom and legacy */ -void radeon_rmx_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct drm_display_mode *native_mode = &radeon_encoder->native_mode; - - if (mode->hdisplay < native_mode->hdisplay || - mode->vdisplay < native_mode->vdisplay) { - int mode_id = adjusted_mode->base.id; - *adjusted_mode = *native_mode; - if (!ASIC_IS_AVIVO(rdev)) { - adjusted_mode->hdisplay = mode->hdisplay; - adjusted_mode->vdisplay = mode->vdisplay; - } - adjusted_mode->base.id = mode_id; - } -} - - static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -198,14 +240,26 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); - if (radeon_encoder->rmx_type != RMX_OFF) - radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); - /* hw bug */ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + /* get the native mode for LVDS */ + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { + struct drm_display_mode *native_mode = &radeon_encoder->native_mode; + int mode_id = adjusted_mode->base.id; + *adjusted_mode = *native_mode; + if (!ASIC_IS_AVIVO(rdev)) { + adjusted_mode->hdisplay = mode->hdisplay; + adjusted_mode->vdisplay = mode->vdisplay; + adjusted_mode->crtc_hdisplay = mode->hdisplay; + adjusted_mode->crtc_vdisplay = mode->vdisplay; + } + adjusted_mode->base.id = mode_id; + } + + /* get the native mode for TV */ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; if (tv_dac) { @@ -218,6 +272,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, } } + if (ASIC_IS_DCE3(rdev) && + (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + radeon_dp_set_link_config(connector, mode); + } + return true; } @@ -392,7 +452,7 @@ union lvds_encoder_control { LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; }; -static void +void atombios_digital_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; @@ -400,6 +460,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); union lvds_encoder_control args; int index = 0; + int hdmi_detected = 0; uint8_t frev, crev; struct radeon_encoder_atom_dig *dig; struct drm_connector *connector; @@ -420,6 +481,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) if (!radeon_connector->con_priv) return; + if (drm_detect_hdmi_monitor(radeon_connector->edid)) + hdmi_detected = 1; + dig_connector = radeon_connector->con_priv; memset(&args, 0, sizeof(args)); @@ -449,13 +513,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) case 1: args.v1.ucMisc = 0; args.v1.ucAction = action; - if (drm_detect_hdmi_monitor(radeon_connector->edid)) + if (hdmi_detected) args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & (1 << 0)) + if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & (1 << 1)) + if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= (1 << 1); } else { if (dig_connector->linkb) @@ -474,7 +538,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) if (dig->coherent_mode) args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; } - if (drm_detect_hdmi_monitor(radeon_connector->edid)) + if (hdmi_detected) args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v2.ucTruncate = 0; @@ -482,18 +546,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v2.ucTemporal = 0; args.v2.ucFRC = 0; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & (1 << 0)) + if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & (1 << 5)) { + if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; - if (dig->lvds_misc & (1 << 1)) + if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; } - if (dig->lvds_misc & (1 << 6)) { + if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; - if (dig->lvds_misc & (1 << 1)) + if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; - if (((dig->lvds_misc >> 2) & 0x3) == 2) + if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { @@ -514,7 +578,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - + r600_hdmi_enable(encoder, hdmi_detected); } int @@ -522,6 +586,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) { struct drm_connector *connector; struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *radeon_dig_connector; connector = radeon_get_connector_for_encoder(encoder); if (!connector) @@ -551,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) return ATOM_ENCODER_MODE_LVDS; break; case DRM_MODE_CONNECTOR_DisplayPort: - /*if (radeon_output->MonType == MT_DP) - return ATOM_ENCODER_MODE_DP; - else*/ - if (drm_detect_hdmi_monitor(radeon_connector->edid)) + case DRM_MODE_CONNECTOR_eDP: + radeon_dig_connector = radeon_connector->con_priv; + if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + return ATOM_ENCODER_MODE_DP; + else if (drm_detect_hdmi_monitor(radeon_connector->edid)) return ATOM_ENCODER_MODE_HDMI; else return ATOM_ENCODER_MODE_DVI; break; - case CONNECTOR_DVI_A: - case CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_DVIA: + case DRM_MODE_CONNECTOR_VGA: return ATOM_ENCODER_MODE_CRT; break; - case CONNECTOR_STV: - case CONNECTOR_CTV: - case CONNECTOR_DIN: + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_9PinDIN: /* fix me */ return ATOM_ENCODER_MODE_TV; /*return ATOM_ENCODER_MODE_CV;*/ @@ -573,6 +640,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) } } +/* + * DIG Encoder/Transmitter Setup + * + * DCE 3.0/3.1 + * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. + * Supports up to 3 digital outputs + * - 2 DIG encoder blocks. + * DIG1 can drive UNIPHY link A or link B + * DIG2 can drive UNIPHY link B or LVTMA + * + * DCE 3.2 + * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). + * Supports up to 5 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * + * Routing + * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) + * Examples: + * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI + * crtc1 -> dig1 -> UNIPHY0 link B -> DP + * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS + * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI + */ static void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) { @@ -605,24 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) memset(&args, 0, sizeof(args)); - if (ASIC_IS_DCE32(rdev)) { - if (dig->dig_block) - index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); - else - index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); - num = dig->dig_block + 1; - } else { - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); - num = 1; - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); - num = 2; - break; - } - } + if (dig->dig_encoder) + index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); + else + index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); + num = dig->dig_encoder + 1; atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); @@ -652,18 +730,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) } } - if (radeon_encoder->pixel_clock > 165000) { - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; + args.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) { + if (dig_connector->dp_clock == 270000) + args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + args.ucLaneNum = dig_connector->dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) args.ucLaneNum = 8; - } else { - if (dig_connector->linkb) - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; - else - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; + else args.ucLaneNum = 4; - } - args.ucEncoderMode = atombios_get_encoder_mode(encoder); + if (dig_connector->linkb) + args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; + else + args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -674,8 +755,8 @@ union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; }; -static void -atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) +void +atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -687,6 +768,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; + bool is_dp = false; connector = radeon_get_connector_for_encoder(encoder); if (!connector) @@ -704,6 +786,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) dig_connector = radeon_connector->con_priv; + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) + is_dp = true; + memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE32(rdev)) @@ -724,17 +809,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { args.v1.usInitInfo = radeon_connector->connector_object_id; + } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { + args.v1.asMode.ucLaneSel = lane_num; + args.v1.asMode.ucLaneSet = lane_set; } else { - if (radeon_encoder->pixel_clock > 165000) + if (is_dp) + args.v1.usPixelClock = + cpu_to_le16(dig_connector->dp_clock / 10); + else if (radeon_encoder->pixel_clock > 165000) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } if (ASIC_IS_DCE32(rdev)) { - if (radeon_encoder->pixel_clock > 165000) - args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); - if (dig->dig_block) + if (dig->dig_encoder == 1) args.v2.acConfig.ucEncoderSel = 1; + if (dig_connector->linkb) + args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: @@ -751,26 +842,30 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) break; } - if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (is_dp) + args.v2.acConfig.fCoherentMode = 1; + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; } } else { + args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; + if (dig->dig_encoder) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; + switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; if (rdev->flags & RADEON_IS_IGP) { if (radeon_encoder->pixel_clock > 165000) { - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B); if (dig_connector->igp_lane_info & 0x3) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; else if (dig_connector->igp_lane_info & 0xc) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } else { - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; if (dig_connector->igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; else if (dig_connector->igp_lane_info & 0x2) @@ -780,42 +875,27 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) else if (dig_connector->igp_lane_info & 0x8) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; } - } else { - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B | - ATOM_TRANSMITTER_CONFIG_LANE_0_7); - else { - if (dig_connector->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - } - } - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; - if (radeon_encoder->pixel_clock > 165000) - args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | - ATOM_TRANSMITTER_CONFIG_LINKA_B | - ATOM_TRANSMITTER_CONFIG_LANE_0_7); - else { - if (dig_connector->linkb) - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else - args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; } break; } - if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (radeon_encoder->pixel_clock > 165000) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; + + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; + else + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; + + if (is_dp) + args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; } } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } static void @@ -918,12 +998,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + dp_link_train(encoder, connector); + } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); break; } } else { @@ -957,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) union crtc_sourc_param args; int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); uint8_t frev, crev; + struct radeon_encoder_atom_dig *dig; memset(&args, 0, sizeof(args)); @@ -1020,20 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (ASIC_IS_DCE32(rdev)) { - if (radeon_crtc->crtc_id) - args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; - else - args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; - } else + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + dig = radeon_encoder->enc_priv; + if (dig->dig_encoder) + args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; + else args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; - break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; @@ -1060,7 +1141,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } static void @@ -1094,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, } } +static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *test_encoder; + struct radeon_encoder_atom_dig *dig; + uint32_t dig_enc_in_use = 0; + /* on DCE32 and encoder can driver any block so just crtc id */ + if (ASIC_IS_DCE32(rdev)) { + return radeon_crtc->crtc_id; + } + + /* on DCE3 - LVTMA can only be driven by DIGB */ + list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_test_encoder; + + if (encoder == test_encoder) + continue; + + if (!radeon_encoder_is_digital(test_encoder)) + continue; + + radeon_test_encoder = to_radeon_encoder(test_encoder); + dig = radeon_test_encoder->enc_priv; + + if (dig->dig_encoder >= 0) + dig_enc_in_use |= (1 << dig->dig_encoder); + } + + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { + if (dig_enc_in_use & 0x2) + DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); + return 1; + } + if (!(dig_enc_in_use & 1)) + return 0; + return 1; +} + static void radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, @@ -1104,11 +1225,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - if (radeon_encoder->enc_priv) { - struct radeon_encoder_atom_dig *dig; - - dig = radeon_encoder->enc_priv; - dig->dig_block = radeon_crtc->crtc_id; + if (radeon_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + if (dig) + dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); } radeon_encoder->pixel_clock = adjusted_mode->clock; @@ -1134,14 +1255,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE); /* setup and enable the encoder and transmitter */ atombios_dig_encoder_setup(encoder, ATOM_ENABLE); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); break; case ENCODER_OBJECT_ID_INTERNAL_DDI: atombios_ddia_setup(encoder, ATOM_ENABLE); @@ -1160,6 +1281,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } atombios_apply_encoder_quirks(encoder, adjusted_mode); + + r600_hdmi_setmode(encoder, adjusted_mode); } static bool @@ -1266,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) static void radeon_atom_encoder_disable(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig; radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (radeon_encoder_is_digital(encoder)) { + dig = radeon_encoder->enc_priv; + dig->dig_encoder = -1; + } radeon_encoder->active_device = 0; } @@ -1323,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) /* coherent mode by default */ dig->coherent_mode = true; + dig->dig_encoder = -1; return dig; } @@ -1354,7 +1484,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su encoder->possible_crtcs = 0x1; else encoder->possible_crtcs = 0x3; - encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; @@ -1406,4 +1535,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; } + + r600_hdmi_init(encoder); } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index d10eb43645c..3ba213d1b06 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev, struct radeon_framebuffer *rfb; struct drm_mode_fb_cmd mode_cmd; struct drm_gem_object *gobj = NULL; - struct radeon_object *robj = NULL; + struct radeon_bo *rbo = NULL; struct device *device = &rdev->pdev->dev; int size, aligned_size, ret; u64 fb_gpuaddr; @@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev, ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, false, ttm_bo_type_kernel, - false, &gobj); + &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", surface_width, surface_height); ret = -ENOMEM; goto out; } - robj = gobj->driver_private; + rbo = gobj->driver_private; if (fb_tiled) tiling_flags = RADEON_TILING_MACRO; @@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev, } #endif - if (tiling_flags) - radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); + if (tiling_flags) { + ret = radeon_bo_set_tiling_flags(rbo, + tiling_flags | RADEON_TILING_SURFACE, + mode_cmd.pitch); + if (ret) + dev_err(rdev->dev, "FB failed to set tiling flags\n"); + } mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev, ret = -ENOMEM; goto out_unref; } - ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); + ret = radeon_bo_reserve(rbo, false); + if (unlikely(ret != 0)) + goto out_unref; + ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); + if (ret) { + radeon_bo_unreserve(rbo); + goto out_unref; + } + if (fb_tiled) + radeon_bo_check_tiling(rbo, 0, 0); + ret = radeon_bo_kmap(rbo, &fbptr); + radeon_bo_unreserve(rbo); if (ret) { - printk(KERN_ERR "failed to pin framebuffer\n"); - ret = -ENOMEM; goto out_unref; } @@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev, *fb_p = fb; rfb = to_radeon_framebuffer(fb); rdev->fbdev_rfb = rfb; - rdev->fbdev_robj = robj; + rdev->fbdev_rbo = rbo; info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); if (info == NULL) { @@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev, if (ret) goto out_unref; - if (fb_tiled) - radeon_object_check_tiling(robj, 0, 0); - - ret = radeon_object_kmap(robj, &fbptr); - if (ret) { - goto out_unref; - } - - memset_io(fbptr, 0, aligned_size); + memset_io(fbptr, 0xff, aligned_size); strcpy(info->fix.id, "radeondrmfb"); @@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev, return 0; out_unref: - if (robj) { - radeon_object_kunmap(robj); + if (rbo) { + ret = radeon_bo_reserve(rbo, false); + if (likely(ret == 0)) { + radeon_bo_kunmap(rbo); + radeon_bo_unreserve(rbo); + } } if (fb && ret) { list_del(&fb->filp_head); @@ -321,14 +331,22 @@ int radeon_parse_options(char *options) int radeonfb_probe(struct drm_device *dev) { - return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); + struct radeon_device *rdev = dev->dev_private; + int bpp_sel = 32; + + /* select 8 bpp console on RN50 or 16MB cards */ + if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) + bpp_sel = 8; + + return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); } int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) { struct fb_info *info; struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); - struct radeon_object *robj; + struct radeon_bo *rbo; + int r; if (!fb) { return -EINVAL; @@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) info = fb->fbdev; if (info) { struct radeon_fb_device *rfbdev = info->par; - robj = rfb->obj->driver_private; + rbo = rfb->obj->driver_private; unregister_framebuffer(info); - radeon_object_kunmap(robj); - radeon_object_unpin(robj); + r = radeon_bo_reserve(rbo, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rbo); + radeon_bo_unpin(rbo); + radeon_bo_unreserve(rbo); + } drm_fb_helper_free(&rfbdev->helper); framebuffer_release(info); } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 3beb26d7471..8495d4e32e1 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) bool radeon_fence_signaled(struct radeon_fence *fence) { - struct radeon_device *rdev = fence->rdev; unsigned long irq_flags; bool signaled = false; - if (rdev->gpu_lockup) { + if (!fence) return true; - } - if (fence == NULL) { + + if (fence->rdev->gpu_lockup) return true; - } + write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); signaled = fence->signaled; /* if we are shuting down report all fence as signaled */ @@ -168,37 +167,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence) return signaled; } -int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy) -{ - struct radeon_device *rdev; - int ret = 0; - - rdev = fence->rdev; - - __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); - - while (1) { - if (radeon_fence_signaled(fence)) - break; - - if (time_after_eq(jiffies, fence->timeout)) { - ret = -EBUSY; - break; - } - - if (lazy) - schedule_timeout(1); - - if (intr && signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } - } - __set_current_state(TASK_RUNNING); - return ret; -} - - int radeon_fence_wait(struct radeon_fence *fence, bool intr) { struct radeon_device *rdev; @@ -216,13 +184,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) return 0; } - if (rdev->family >= CHIP_R600) { - r = r600_fence_wait(fence, intr, 0); - if (r == -ERESTARTSYS) - return -EBUSY; - return r; - } - retry: cur_jiffies = jiffies; timeout = HZ / 100; @@ -231,14 +192,17 @@ retry: } if (intr) { + radeon_irq_kms_sw_irq_get(rdev); r = wait_event_interruptible_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); - if (unlikely(r == -ERESTARTSYS)) { - return -EBUSY; - } + radeon_irq_kms_sw_irq_put(rdev); + if (unlikely(r < 0)) + return r; } else { + radeon_irq_kms_sw_irq_get(rdev); r = wait_event_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); + radeon_irq_kms_sw_irq_put(rdev); } if (unlikely(!radeon_fence_signaled(fence))) { if (unlikely(r == 0)) { @@ -359,7 +323,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); if (r) { - DRM_ERROR("Fence failed to get a scratch register."); + dev_err(rdev->dev, "fence failed to get scratch register\n"); write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); return r; } @@ -370,9 +334,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev) INIT_LIST_HEAD(&rdev->fence_drv.signaled); rdev->fence_drv.count_timeout = 0; init_waitqueue_head(&rdev->fence_drv.queue); + rdev->fence_drv.initialized = true; write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); if (radeon_debugfs_fence_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for fence !\n"); + dev_err(rdev->dev, "fence debugfs file creation failed\n"); } return 0; } @@ -381,11 +346,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) { unsigned long irq_flags; + if (!rdev->fence_drv.initialized) + return; wake_up_all(&rdev->fence_drv.queue); write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); - DRM_INFO("radeon: fence finalized\n"); + rdev->fence_drv.initialized = false; } diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h index 90187d17384..3d4d84e078a 100644 --- a/drivers/gpu/drm/radeon/radeon_fixed.h +++ b/drivers/gpu/drm/radeon/radeon_fixed.h @@ -38,6 +38,23 @@ typedef union rfixed { #define fixed_init_half(A) { .full = rfixed_const_half((A)) } #define rfixed_trunc(A) ((A).full >> 12) +static inline u32 rfixed_floor(fixed20_12 A) +{ + u32 non_frac = rfixed_trunc(A); + + return rfixed_const(non_frac); +} + +static inline u32 rfixed_ceil(fixed20_12 A) +{ + u32 non_frac = rfixed_trunc(A); + + if (A.full > rfixed_const(non_frac)) + return rfixed_const(non_frac + 1); + else + return rfixed_const(non_frac); +} + static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) { u64 tmp = ((u64)A.full << 13); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a68d7566178..e73d56e83fa 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj == NULL) { - r = radeon_object_create(rdev, NULL, - rdev->gart.table_size, - true, - RADEON_GEM_DOMAIN_VRAM, - false, &rdev->gart.table.vram.robj); + r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, + true, RADEON_GEM_DOMAIN_VRAM, + &rdev->gart.table.vram.robj); if (r) { return r; } @@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) uint64_t gpu_addr; int r; - r = radeon_object_pin(rdev->gart.table.vram.robj, - RADEON_GEM_DOMAIN_VRAM, &gpu_addr); - if (r) { - radeon_object_unref(&rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (unlikely(r != 0)) return r; - } - r = radeon_object_kmap(rdev->gart.table.vram.robj, - (void **)&rdev->gart.table.vram.ptr); + r = radeon_bo_pin(rdev->gart.table.vram.robj, + RADEON_GEM_DOMAIN_VRAM, &gpu_addr); if (r) { - radeon_object_unpin(rdev->gart.table.vram.robj); - radeon_object_unref(&rdev->gart.table.vram.robj); - DRM_ERROR("radeon: failed to map gart vram table.\n"); + radeon_bo_unreserve(rdev->gart.table.vram.robj); return r; } + r = radeon_bo_kmap(rdev->gart.table.vram.robj, + (void **)&rdev->gart.table.vram.ptr); + if (r) + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); rdev->gart.table_addr = gpu_addr; - return 0; + return r; } void radeon_gart_table_vram_free(struct radeon_device *rdev) { + int r; + if (rdev->gart.table.vram.robj == NULL) { return; } - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); - radeon_object_unref(&rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } + radeon_bo_unref(&rdev->gart.table.vram.robj); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d880edf254d..0e1325e1853 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj) void radeon_gem_object_free(struct drm_gem_object *gobj) { - struct radeon_object *robj = gobj->driver_private; + struct radeon_bo *robj = gobj->driver_private; gobj->driver_private = NULL; if (robj) { - radeon_object_unref(&robj); + radeon_bo_unref(&robj); } } int radeon_gem_object_create(struct radeon_device *rdev, int size, - int alignment, int initial_domain, - bool discardable, bool kernel, - bool interruptible, - struct drm_gem_object **obj) + int alignment, int initial_domain, + bool discardable, bool kernel, + struct drm_gem_object **obj) { struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *robj; int r; *obj = NULL; @@ -65,11 +64,11 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, if (alignment < PAGE_SIZE) { alignment = PAGE_SIZE; } - r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, - interruptible, &robj); + r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); if (r) { - DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", - size, initial_domain, alignment); + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + size, initial_domain, alignment, r); mutex_lock(&rdev->ddev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&rdev->ddev->struct_mutex); @@ -83,33 +82,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, uint64_t *gpu_addr) { - struct radeon_object *robj = obj->driver_private; - uint32_t flags; + struct radeon_bo *robj = obj->driver_private; + int r; - switch (pin_domain) { - case RADEON_GEM_DOMAIN_VRAM: - flags = TTM_PL_FLAG_VRAM; - break; - case RADEON_GEM_DOMAIN_GTT: - flags = TTM_PL_FLAG_TT; - break; - default: - flags = TTM_PL_FLAG_SYSTEM; - break; - } - return radeon_object_pin(robj, flags, gpu_addr); + r = radeon_bo_reserve(robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(robj, pin_domain, gpu_addr); + radeon_bo_unreserve(robj); + return r; } void radeon_gem_object_unpin(struct drm_gem_object *obj) { - struct radeon_object *robj = obj->driver_private; - radeon_object_unpin(robj); + struct radeon_bo *robj = obj->driver_private; + int r; + + r = radeon_bo_reserve(robj, false); + if (likely(r == 0)) { + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); + } } int radeon_gem_set_domain(struct drm_gem_object *gobj, uint32_t rdomain, uint32_t wdomain) { - struct radeon_object *robj; + struct radeon_bo *robj; uint32_t domain; int r; @@ -127,7 +126,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = radeon_object_wait(robj); + r = radeon_bo_wait(robj, NULL, false); if (r) { printk(KERN_ERR "Failed to wait for object !\n"); return r; @@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev) void radeon_gem_fini(struct radeon_device *rdev) { - radeon_object_force_delete(rdev); + radeon_bo_force_delete(rdev); } @@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct drm_radeon_gem_info *args = data; args->vram_size = rdev->mc.real_vram_size; - /* FIXME: report somethings that makes sense */ - args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); - args->gart_size = rdev->mc.gtt_size; + args->vram_visible = rdev->mc.real_vram_size; + if (rdev->stollen_vga_memory) + args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); + if (rdev->fbdev_rbo) + args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); + args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - + RADEON_IB_POOL_SIZE*64*1024; return 0; } @@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, - args->initial_domain, false, - false, true, &gobj); + args->initial_domain, false, + false, &gobj); if (r) { return r; } @@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, * just validate the BO into a certain domain */ struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *robj; int r; /* for now if someone requests domain CPU - @@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, { struct drm_radeon_gem_mmap *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; - int r; + struct radeon_bo *robj; gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { return -EINVAL; } robj = gobj->driver_private; - r = radeon_object_mmap(robj, &args->addr_ptr); + args->addr_ptr = radeon_bo_mmap_offset(robj); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); - return r; + return 0; } int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, @@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_radeon_gem_busy *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *robj; int r; - uint32_t cur_placement; + uint32_t cur_placement = 0; gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { return -EINVAL; } robj = gobj->driver_private; - r = radeon_object_busy_domain(robj, &cur_placement); + r = radeon_bo_wait(robj, &cur_placement, true); switch (cur_placement) { case TTM_PL_VRAM: args->domain = RADEON_GEM_DOMAIN_VRAM; @@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, { struct drm_radeon_gem_wait_idle *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *robj; int r; gobj = drm_gem_object_lookup(dev, filp, args->handle); @@ -305,7 +307,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -EINVAL; } robj = gobj->driver_private; - r = radeon_object_wait(robj); + r = radeon_bo_wait(robj, NULL, false); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); @@ -317,7 +319,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, { struct drm_radeon_gem_set_tiling *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *robj; int r = 0; DRM_DEBUG("%d \n", args->handle); @@ -325,7 +327,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, if (gobj == NULL) return -EINVAL; robj = gobj->driver_private; - radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); + r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); @@ -337,16 +339,20 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, { struct drm_radeon_gem_get_tiling *args = data; struct drm_gem_object *gobj; - struct radeon_object *robj; + struct radeon_bo *rbo; int r = 0; DRM_DEBUG("\n"); gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -EINVAL; - robj = gobj->driver_private; - radeon_object_get_tiling_flags(robj, &args->tiling_flags, - &args->pitch); + rbo = gobj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + goto out; + radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); + radeon_bo_unreserve(rbo); +out: mutex_lock(&dev->struct_mutex); drm_gem_object_unreference(gobj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index dd438d32e5c..da3da1e89d0 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) } -void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) +void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) { - struct radeon_device *rdev = radeon_connector->base.dev->dev_private; + struct radeon_device *rdev = i2c->dev->dev_private; + struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; - struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; /* RV410 appears to have a bug where the hw i2c in reset * holds the i2c port in a bad state - switch hw i2c away before * doing DDC - do this for all r200s/r300s/r400s for safety sake */ - if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { - if (rec->a_clk_reg == RADEON_GPIO_MONID) { - WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | - R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); - } else { - WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | - R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); + if (rec->hw_capable) { + if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { + if (rec->a_clk_reg == RADEON_GPIO_MONID) { + WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | + R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); + } else { + WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | + R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); + } } } - if (lock_state) { - temp = RREG32(rec->a_clk_reg); - temp &= ~(rec->a_clk_mask); - WREG32(rec->a_clk_reg, temp); - - temp = RREG32(rec->a_data_reg); - temp &= ~(rec->a_data_mask); - WREG32(rec->a_data_reg, temp); - } + /* clear the output pin values */ + temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; + WREG32(rec->a_clk_reg, temp); + + temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask; + WREG32(rec->a_data_reg, temp); + + /* set the pins to input */ + temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; + WREG32(rec->en_clk_reg, temp); + + temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask; + WREG32(rec->en_data_reg, temp); + + /* mask the gpio pins for software use */ temp = RREG32(rec->mask_clk_reg); if (lock_state) temp |= rec->mask_clk_mask; @@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv) struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t val; - val = RREG32(rec->get_clk_reg); - val &= rec->get_clk_mask; + /* read the value off the pin */ + val = RREG32(rec->y_clk_reg); + val &= rec->y_clk_mask; return (val != 0); } @@ -126,8 +135,10 @@ static int get_data(void *i2c_priv) struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t val; - val = RREG32(rec->get_data_reg); - val &= rec->get_data_mask; + /* read the value off the pin */ + val = RREG32(rec->y_data_reg); + val &= rec->y_data_mask; + return (val != 0); } @@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock) struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t val; - val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); - val |= clock ? 0 : rec->put_clk_mask; - WREG32(rec->put_clk_reg, val); + /* set pin direction */ + val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; + val |= clock ? 0 : rec->en_clk_mask; + WREG32(rec->en_clk_reg, val); } static void set_data(void *i2c_priv, int data) @@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data) struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t val; - val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); - val |= data ? 0 : rec->put_data_mask; - WREG32(rec->put_data_reg, val); + /* set pin direction */ + val = RREG32(rec->en_data_reg) & ~rec->en_data_mask; + val |= data ? 0 : rec->en_data_mask; + WREG32(rec->en_data_reg, val); } struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, - struct radeon_i2c_bus_rec *rec, - const char *name) + struct radeon_i2c_bus_rec *rec, + const char *name) { struct radeon_i2c_chan *i2c; int ret; @@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, return NULL; i2c->adapter.owner = THIS_MODULE; - i2c->adapter.algo_data = &i2c->algo; i2c->dev = dev; - i2c->algo.setsda = set_data; - i2c->algo.setscl = set_clock; - i2c->algo.getsda = get_data; - i2c->algo.getscl = get_clock; - i2c->algo.udelay = 20; + i2c_set_adapdata(&i2c->adapter, i2c); + i2c->adapter.algo_data = &i2c->algo.bit; + i2c->algo.bit.setsda = set_data; + i2c->algo.bit.setscl = set_clock; + i2c->algo.bit.getsda = get_data; + i2c->algo.bit.getscl = get_clock; + i2c->algo.bit.udelay = 20; /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always * make this, 2 jiffies is a lot more reliable */ - i2c->algo.timeout = 2; - i2c->algo.data = i2c; + i2c->algo.bit.timeout = 2; + i2c->algo.bit.data = i2c; i2c->rec = *rec; - i2c_set_adapdata(&i2c->adapter, i2c); - ret = i2c_bit_add_bus(&i2c->adapter); if (ret) { DRM_INFO("Failed to register i2c %s\n", name); @@ -194,6 +206,38 @@ out_free: } +struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, + struct radeon_i2c_bus_rec *rec, + const char *name) +{ + struct radeon_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); + if (i2c == NULL) + return NULL; + + i2c->rec = *rec; + i2c->adapter.owner = THIS_MODULE; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + i2c->adapter.algo_data = &i2c->algo.dp; + i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; + i2c->algo.dp.address = 0; + ret = i2c_dp_aux_add_bus(&i2c->adapter); + if (ret) { + DRM_INFO("Failed to register i2c %s\n", name); + goto out_free; + } + + return i2c; +out_free: + kfree(i2c); + return NULL; + +} + + void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) @@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) { return NULL; } + +void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 *val) +{ + u8 out_buf[2]; + u8 in_buf[2]; + struct i2c_msg msgs[] = { + { + .addr = slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) { + *val = in_buf[0]; + DRM_DEBUG("val = 0x%02x\n", *val); + } else { + DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", + addr, *val); + } +} + +void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 val) +{ + uint8_t out_buf[2]; + struct i2c_msg msg = { + .addr = slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = val; + + if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) + DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", + addr, val); +} + diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c index a1bf11de308..48b7cea31e0 100644 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c @@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd, &init->gart_textures_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); + return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); } typedef struct drm_radeon_clear32 { @@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, &clr->depth_boxes)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); + return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); } typedef struct drm_radeon_stipple32 { @@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, &request->mask)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); } typedef struct drm_radeon_tex_image32 { @@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, &image->data)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); } typedef struct drm_radeon_vertex2_32 { @@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, &request->prim)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); } typedef struct drm_radeon_cmd_buffer32 { @@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, &request->boxes)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); } typedef struct drm_radeon_getparam32 { @@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, &request->value)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); } typedef struct drm_radeon_mem_alloc32 { @@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, &request->region_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); } typedef struct drm_radeon_irq_emit32 { @@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, &request->irq_seq)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); + return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); } /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ @@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, &request->value)) return -EFAULT; - return drm_ioctl(file->f_dentry->d_inode, file, - DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); + return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); } #else #define compat_radeon_cp_setparam NULL @@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; - lock_kernel(); /* XXX for now */ if (fn != NULL) ret = (*fn) (filp, cmd, arg); else - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); - unlock_kernel(); + ret = drm_ioctl(filp, cmd, arg); return ret; } @@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - lock_kernel(); /* XXX for now */ - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); - unlock_kernel(); + ret = drm_ioctl(filp, cmd, arg); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index b79ecc4a7cc..2f349a30019 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr drm_radeon_irq_emit_t *emit = data; int result; - if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) - return -EINVAL; - - LOCK_TEST_WITH_RETURN(dev, file_priv); - if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return -EINVAL; + + LOCK_TEST_WITH_RETURN(dev, file_priv); + result = radeon_emit_irq(dev); if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a0fe6232dcb..3cfd60fd008 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) return radeon_irq_process(rdev); } +/* + * Handle hotplug events outside the interrupt handler proper. + */ +static void radeon_hotplug_work_func(struct work_struct *work) +{ + struct radeon_device *rdev = container_of(work, struct radeon_device, + hotplug_work); + struct drm_device *dev = rdev->ddev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); + } + /* Just fire off a uevent and let userspace tell us what to do */ + drm_sysfs_hotplug_event(dev); +} + void radeon_driver_irq_preinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; unsigned i; + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + /* Disable *all* interrupts */ rdev->irq.sw_int = false; for (i = 0; i < 2; i++) { @@ -76,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) rdev->irq.sw_int = false; for (i = 0; i < 2; i++) { rdev->irq.crtc_vblank_int[i] = false; + rdev->irq.hpd[i] = false; } radeon_irq_set(rdev); } @@ -87,30 +109,69 @@ int radeon_irq_kms_init(struct radeon_device *rdev) if (rdev->flags & RADEON_SINGLE_CRTC) num_crtc = 1; - + spin_lock_init(&rdev->irq.sw_lock); r = drm_vblank_init(rdev->ddev, num_crtc); if (r) { return r; } /* enable msi */ rdev->msi_enabled = 0; - if (rdev->family >= CHIP_RV380) { + /* MSIs don't seem to work on my rs780; + * not sure about rs880 or other rs780s. + * Needs more investigation. + */ + if ((rdev->family >= CHIP_RV380) && + (rdev->family != CHIP_RS780) && + (rdev->family != CHIP_RS880)) { int ret = pci_enable_msi(rdev->pdev); - if (!ret) + if (!ret) { rdev->msi_enabled = 1; + DRM_INFO("radeon: using MSI.\n"); + } } - drm_irq_install(rdev->ddev); rdev->irq.installed = true; + r = drm_irq_install(rdev->ddev); + if (r) { + rdev->irq.installed = false; + return r; + } DRM_INFO("radeon: irq initialized.\n"); return 0; } void radeon_irq_kms_fini(struct radeon_device *rdev) { + drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { - rdev->irq.installed = false; drm_irq_uninstall(rdev->ddev); + rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); } } + +void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) +{ + unsigned long irqflags; + + spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); + if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { + rdev->irq.sw_int = true; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); +} + +void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) +{ + unsigned long irqflags; + + spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); + BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); + if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { + rdev->irq.sw_int = false; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); +} + diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index ba128621057..f23b05606eb 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -30,10 +30,19 @@ #include "radeon.h" #include "radeon_drm.h" +int radeon_driver_unload_kms(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + + if (rdev == NULL) + return 0; + radeon_modeset_fini(rdev); + radeon_device_fini(rdev); + kfree(rdev); + dev->dev_private = NULL; + return 0; +} -/* - * Driver load/unload - */ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; @@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) */ r = radeon_device_init(rdev, dev, dev->pdev, flags); if (r) { - DRM_ERROR("Fatal error while trying to initialize radeon.\n"); - return r; + dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + goto out; } /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run */ r = radeon_modeset_init(rdev); - if (r) { - return r; - } - return 0; -} - -int radeon_driver_unload_kms(struct drm_device *dev) -{ - struct radeon_device *rdev = dev->dev_private; - - if (rdev == NULL) - return 0; - radeon_modeset_fini(rdev); - radeon_device_fini(rdev); - kfree(rdev); - dev->dev_private = NULL; - return 0; + if (r) + dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); +out: + if (r) + radeon_driver_unload_kms(dev); + return r; } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8d0b7aa87fa..b6d8081e124 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -30,9 +30,20 @@ #include "radeon.h" #include "atom.h" +static void radeon_overscan_setup(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + + WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0); + WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0); + WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0); +} + static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -292,8 +303,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) uint32_t mask; if (radeon_crtc->crtc_id) - mask = (RADEON_CRTC2_EN | - RADEON_CRTC2_DISP_DIS | + mask = (RADEON_CRTC2_DISP_DIS | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS | RADEON_CRTC2_DISP_REQ_EN_B); @@ -305,7 +315,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: if (radeon_crtc->crtc_id) - WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); + WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); else { WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | RADEON_CRTC_DISP_REQ_EN_B)); @@ -319,7 +329,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); if (radeon_crtc->crtc_id) - WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); + WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | RADEON_CRTC_DISP_REQ_EN_B)); @@ -329,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) } } -/* properly set crtc bpp when using atombios */ -void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int format; - uint32_t crtc_gen_cntl; - uint32_t disp_merge_cntl; - uint32_t crtc_pitch; - - switch (crtc->fb->bits_per_pixel) { - case 8: - format = 2; - break; - case 15: /* 555 */ - format = 3; - break; - case 16: /* 565 */ - format = 4; - break; - case 24: /* RGB */ - format = 5; - break; - case 32: /* xRGB */ - format = 6; - break; - default: - return; - } - - crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + - ((crtc->fb->bits_per_pixel * 8) - 1)) / - (crtc->fb->bits_per_pixel * 8)); - crtc_pitch |= crtc_pitch << 16; - - WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); - - switch (radeon_crtc->crtc_id) { - case 0: - disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); - disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; - WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); - - crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff; - crtc_gen_cntl |= (format << 8); - crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN; - WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); - break; - case 1: - disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); - disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; - WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); - - crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff; - crtc_gen_cntl |= (format << 8); - WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl); - WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); - WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); - break; - } -} - int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -400,14 +347,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; + struct radeon_bo *rbo; uint64_t base; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; uint32_t tiling_flags; int format; uint32_t gen_cntl_reg, gen_cntl_val; + int r; DRM_DEBUG("\n"); + /* no fb bound */ + if (!crtc->fb) { + DRM_DEBUG("No FB bound\n"); + return 0; + } radeon_fb = to_radeon_framebuffer(crtc->fb); @@ -431,10 +385,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, return false; } + /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; - if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { + rbo = obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); return -EINVAL; } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); + radeon_bo_unreserve(rbo); + if (tiling_flags & RADEON_TILING_MICRO) + DRM_ERROR("trying to scanout microtiled buffer\n"); + /* if scanout was in GTT this really wouldn't work */ /* crtc offset is from display base addr not FB location */ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; @@ -449,10 +415,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, (crtc->fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; - radeon_object_get_tiling_flags(obj->driver_private, - &tiling_flags, NULL); - if (tiling_flags & RADEON_TILING_MICRO) - DRM_ERROR("trying to scanout microtiled buffer\n"); if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) @@ -530,7 +492,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb && old_fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(old_fb); - radeon_gem_object_unpin(radeon_fb->obj); + rbo = radeon_fb->obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + radeon_bo_unpin(rbo); + radeon_bo_unreserve(rbo); } /* Bytes per pixel may have changed */ @@ -642,12 +609,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod uint32_t crtc2_gen_cntl; uint32_t disp2_merge_cntl; - /* check to see if TV DAC is enabled for another crtc and keep it enabled */ - if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) - crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON; - else - crtc2_gen_cntl = 0; - + /* if TV DAC is enabled for another crtc and keep it enabled */ + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080; crtc2_gen_cntl |= ((format << 8) | RADEON_CRTC2_VSYNC_DIS | RADEON_CRTC2_HSYNC_DIS @@ -676,7 +639,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod uint32_t crtc_ext_cntl; uint32_t disp_merge_cntl; - crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN + crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000; + crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN | (format << 8) | RADEON_CRTC_DISP_REQ_EN_B | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -728,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) uint32_t post_divider = 0; uint32_t freq = 0; uint8_t pll_gain; - int pll_flags = RADEON_PLL_LEGACY; bool use_bios_divs = false; /* PLL registers */ uint32_t pll_ref_div = 0; @@ -762,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) else pll = &rdev->clock.p1pll; + pll->flags = RADEON_PLL_LEGACY; + if (mode->clock > 200000) /* range limits??? */ - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { @@ -777,20 +742,22 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) } if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) - pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; - if (lvds) { - if (lvds->use_bios_dividers) { - pll_ref_div = lvds->panel_ref_divider; - pll_fb_post_div = (lvds->panel_fb_divider | - (lvds->panel_post_divider << 16)); - htotal_cntl = 0; - use_bios_divs = true; + if (!rdev->is_atom_bios) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; + if (lvds) { + if (lvds->use_bios_dividers) { + pll_ref_div = lvds->panel_ref_divider; + pll_fb_post_div = (lvds->panel_fb_divider | + (lvds->panel_post_divider << 16)); + htotal_cntl = 0; + use_bios_divs = true; + } } } - pll_flags |= RADEON_PLL_USE_REF_DIV; + pll->flags |= RADEON_PLL_USE_REF_DIV; } } } @@ -800,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) if (!use_bios_divs) { radeon_compute_pll(pll, mode->clock, &freq, &feedback_div, &frac_fb_div, - &reference_div, &post_divider, - pll_flags); + &reference_div, &post_divider); for (post_div = &post_divs[0]; post_div->divider; ++post_div) { if (post_div->divider == post_divider) @@ -1027,8 +993,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, radeon_crtc_set_base(crtc, x, y, old_fb); radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode); + radeon_overscan_setup(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) { - radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); + radeon_legacy_rmx_mode_set(crtc, adjusted_mode); } else { if (radeon_crtc->rmx_type != RMX_OFF) { /* FIXME: only first crtc has rmx what should we @@ -1042,12 +1009,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { - radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + struct drm_device *dev = crtc->dev; + struct drm_crtc *crtci; + + /* + * The hardware wedges sometimes if you reconfigure one CRTC + * whilst another is running (see fdo bug #24611). + */ + list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) + radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF); } static void radeon_crtc_commit(struct drm_crtc *crtc) { - radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + struct drm_device *dev = crtc->dev; + struct drm_crtc *crtci; + + /* + * Reenable the CRTCs that should be running. + */ + list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) { + if (crtci->enabled) + radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); + } } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 00382122869..38e45e231ef 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; int panel_pwr_delay = 2000; + bool is_mac = false; DRM_DEBUG("\n"); if (radeon_encoder->enc_priv) { @@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) } } + /* macs (and possibly some x86 oem systems?) wire up LVDS strangely + * Taken from radeonfb. + */ + if ((rdev->mode_info.connector_table == CT_IBOOK) || + (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) || + (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) || + (rdev->mode_info.connector_table == CT_POWERBOOK_VGA)) + is_mac = true; + switch (mode) { case DRM_MODE_DPMS_ON: disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN); @@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); + if (is_mac) + lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS); udelay(panel_pwr_delay * 1000); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); @@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; - lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); + if (is_mac) { + lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; + WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN); + } else { + WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); + lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); + } udelay(panel_pwr_delay * 1000); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); @@ -136,7 +155,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); - if ((!rdev->is_atom_bios)) { + if (rdev->is_atom_bios) { + /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl + * need to call that on resume to set up the reg properly. + */ + radeon_encoder->pixel_clock = adjusted_mode->clock; + atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); + lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); + } else { struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); @@ -147,8 +173,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); } else lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); - } else - lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); + } lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | @@ -184,9 +209,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } -static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -194,15 +219,24 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); - if (radeon_encoder->rmx_type != RMX_OFF) - radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); + /* get the native mode for LVDS */ + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { + struct drm_display_mode *native_mode = &radeon_encoder->native_mode; + int mode_id = adjusted_mode->base.id; + *adjusted_mode = *native_mode; + adjusted_mode->hdisplay = mode->hdisplay; + adjusted_mode->vdisplay = mode->vdisplay; + adjusted_mode->crtc_hdisplay = mode->hdisplay; + adjusted_mode->crtc_vdisplay = mode->vdisplay; + adjusted_mode->base.id = mode_id; + } return true; } static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .dpms = radeon_legacy_lvds_dpms, - .mode_fixup = radeon_legacy_lvds_mode_fixup, + .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_lvds_prepare, .mode_set = radeon_legacy_lvds_mode_set, .commit = radeon_legacy_lvds_commit, @@ -214,17 +248,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { .destroy = radeon_enc_destroy, }; -static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); - drm_mode_set_crtcinfo(adjusted_mode, 0); - - return true; -} - static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -410,7 +433,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { .dpms = radeon_legacy_primary_dac_dpms, - .mode_fixup = radeon_legacy_primary_dac_mode_fixup, + .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_primary_dac_prepare, .mode_set = radeon_legacy_primary_dac_mode_set, .commit = radeon_legacy_primary_dac_commit, @@ -423,16 +446,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; -static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - - drm_mode_set_crtcinfo(adjusted_mode, 0); - - return true; -} - static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -584,7 +597,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { .dpms = radeon_legacy_tmds_int_dpms, - .mode_fixup = radeon_legacy_tmds_int_mode_fixup, + .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_int_prepare, .mode_set = radeon_legacy_tmds_int_mode_set, .commit = radeon_legacy_tmds_int_commit, @@ -596,17 +609,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { .destroy = radeon_enc_destroy, }; -static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); - drm_mode_set_crtcinfo(adjusted_mode, 0); - - return true; -} - static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -697,6 +699,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, /*if (mode->clock > 165000) fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ } + if (!radeon_combios_external_tmds_setup(encoder)) + radeon_external_tmds_setup(encoder); } if (radeon_crtc->crtc_id == 0) { @@ -724,9 +728,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); } +static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; + if (tmds) { + if (tmds->i2c_bus) + radeon_i2c_destroy(tmds->i2c_bus); + } + kfree(radeon_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(radeon_encoder); +} + static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { .dpms = radeon_legacy_tmds_ext_dpms, - .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, + .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tmds_ext_prepare, .mode_set = radeon_legacy_tmds_ext_mode_set, .commit = radeon_legacy_tmds_ext_commit, @@ -735,20 +752,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { - .destroy = radeon_enc_destroy, + .destroy = radeon_ext_tmds_enc_destroy, }; -static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* set the active encoder to connector routing */ - radeon_encoder_set_active_device(encoder); - drm_mode_set_crtcinfo(adjusted_mode, 0); - - return true; -} - static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -1265,7 +1271,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { .dpms = radeon_legacy_tv_dac_dpms, - .mode_fixup = radeon_legacy_tv_dac_mode_fixup, + .mode_fixup = radeon_legacy_mode_fixup, .prepare = radeon_legacy_tv_dac_prepare, .mode_set = radeon_legacy_tv_dac_mode_set, .commit = radeon_legacy_tv_dac_commit, @@ -1302,6 +1308,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon return tmds; } +static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder_ext_tmds *tmds = NULL; + bool ret; + + if (rdev->is_atom_bios) + return NULL; + + tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL); + + if (!tmds) + return NULL; + + ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); + + if (ret == false) + radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); + + return tmds; +} + void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) { @@ -1329,7 +1358,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t encoder->possible_crtcs = 0x1; else encoder->possible_crtcs = 0x3; - encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; @@ -1373,7 +1401,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); if (!rdev->is_atom_bios) - radeon_combios_get_ext_tmds_info(radeon_encoder); + radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); break; } } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 3a12bb0c056..417684daef4 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c @@ -77,7 +77,7 @@ struct radeon_tv_mode_constants { unsigned pix_to_tv; }; -static const uint16_t hor_timing_NTSC[] = { +static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = { 0x0007, 0x003f, 0x0263, @@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = { 0 }; -static const uint16_t vert_timing_NTSC[] = { +static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = { 0x2001, 0x200d, 0x1006, @@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = { 0 }; -static const uint16_t hor_timing_PAL[] = { +static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = { 0x0007, 0x0058, 0x027c, @@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = { 0 }; -static const uint16_t vert_timing_PAL[] = { +static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = { 0x2001, 0x200c, 0x1005, @@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, } flicker_removal = (tmp + 500) / 1000; - if (flicker_removal < 3) - flicker_removal = 3; - for (i = 0; i < 6; ++i) { + if (flicker_removal < 2) + flicker_removal = 2; + for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { if (flicker_removal == SLOPE_limit[i]) break; } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ace726aa0d7..e81b2aeb6a8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -33,6 +33,7 @@ #include <drm_crtc.h> #include <drm_mode.h> #include <drm_edid.h> +#include <drm_dp_helper.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> @@ -45,32 +46,6 @@ struct radeon_device; #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) -enum radeon_connector_type { - CONNECTOR_NONE, - CONNECTOR_VGA, - CONNECTOR_DVI_I, - CONNECTOR_DVI_D, - CONNECTOR_DVI_A, - CONNECTOR_STV, - CONNECTOR_CTV, - CONNECTOR_LVDS, - CONNECTOR_DIGITAL, - CONNECTOR_SCART, - CONNECTOR_HDMI_TYPE_A, - CONNECTOR_HDMI_TYPE_B, - CONNECTOR_0XC, - CONNECTOR_0XD, - CONNECTOR_DIN, - CONNECTOR_DISPLAY_PORT, - CONNECTOR_UNSUPPORTED -}; - -enum radeon_dvi_type { - DVI_AUTO, - DVI_DIGITAL, - DVI_ANALOG -}; - enum radeon_rmx_type { RMX_OFF, RMX_FULL, @@ -87,26 +62,48 @@ enum radeon_tv_std { TV_STD_SCART_PAL, TV_STD_SECAM, TV_STD_PAL_CN, + TV_STD_PAL_N, }; +/* radeon gpio-based i2c + * 1. "mask" reg and bits + * grabs the gpio pins for software use + * 0=not held 1=held + * 2. "a" reg and bits + * output pin value + * 0=low 1=high + * 3. "en" reg and bits + * sets the pin direction + * 0=input 1=output + * 4. "y" reg and bits + * input pin value + * 0=low 1=high + */ struct radeon_i2c_bus_rec { bool valid; + /* id used by atom */ + uint8_t i2c_id; + /* can be used with hw i2c engine */ + bool hw_capable; + /* uses multi-media i2c engine */ + bool mm_i2c; + /* regs and bits */ uint32_t mask_clk_reg; uint32_t mask_data_reg; uint32_t a_clk_reg; uint32_t a_data_reg; - uint32_t put_clk_reg; - uint32_t put_data_reg; - uint32_t get_clk_reg; - uint32_t get_data_reg; + uint32_t en_clk_reg; + uint32_t en_data_reg; + uint32_t y_clk_reg; + uint32_t y_data_reg; uint32_t mask_clk_mask; uint32_t mask_data_mask; - uint32_t put_clk_mask; - uint32_t put_data_mask; - uint32_t get_clk_mask; - uint32_t get_data_mask; uint32_t a_clk_mask; uint32_t a_data_mask; + uint32_t en_clk_mask; + uint32_t en_data_mask; + uint32_t y_clk_mask; + uint32_t y_data_mask; }; struct radeon_tmds_pll { @@ -128,16 +125,24 @@ struct radeon_tmds_pll { #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) +#define RADEON_PLL_USE_POST_DIV (1 << 12) struct radeon_pll { - uint16_t reference_freq; - uint16_t reference_div; + /* reference frequency */ + uint32_t reference_freq; + + /* fixed dividers */ + uint32_t reference_div; + uint32_t post_div; + + /* pll in/out limits */ uint32_t pll_in_min; uint32_t pll_in_max; uint32_t pll_out_min; uint32_t pll_out_max; - uint16_t xclk; + uint32_t best_vco; + /* divider limits */ uint32_t min_ref_div; uint32_t max_ref_div; uint32_t min_post_div; @@ -146,13 +151,21 @@ struct radeon_pll { uint32_t max_feedback_div; uint32_t min_frac_feedback_div; uint32_t max_frac_feedback_div; - uint32_t best_vco; + + /* flags for the current clock */ + uint32_t flags; + + /* pll id */ + uint32_t id; }; struct radeon_i2c_chan { - struct drm_device *dev; struct i2c_adapter adapter; - struct i2c_algo_bit_data algo; + struct drm_device *dev; + union { + struct i2c_algo_dp_aux_data dp; + struct i2c_algo_bit_data bit; + } algo; struct radeon_i2c_bus_rec rec; }; @@ -170,6 +183,11 @@ enum radeon_connector_table { CT_EMAC, }; +enum radeon_dvo_chip { + DVO_SIL164, + DVO_SIL1178, +}; + struct radeon_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; @@ -261,6 +279,13 @@ struct radeon_encoder_int_tmds { struct radeon_tmds_pll tmds_pll[4]; }; +struct radeon_encoder_ext_tmds { + /* tmds over dvo */ + struct radeon_i2c_chan *i2c_bus; + uint8_t slave_addr; + enum radeon_dvo_chip dvo_chip; +}; + /* spread spectrum */ struct radeon_atom_ss { uint16_t percentage; @@ -274,7 +299,7 @@ struct radeon_atom_ss { struct radeon_encoder_atom_dig { /* atom dig */ bool coherent_mode; - int dig_block; + int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ /* atom lvds */ uint32_t lvds_misc; uint16_t panel_pwr_delay; @@ -297,11 +322,43 @@ struct radeon_encoder { enum radeon_rmx_type rmx_type; struct drm_display_mode native_mode; void *enc_priv; + int hdmi_offset; + int hdmi_audio_workaround; + int hdmi_buffer_status; }; struct radeon_connector_atom_dig { uint32_t igp_lane_info; bool linkb; + /* displayport */ + struct radeon_i2c_chan *dp_i2c_bus; + u8 dpcd[8]; + u8 dp_sink_type; + int dp_clock; + int dp_lane_count; +}; + +struct radeon_gpio_rec { + bool valid; + u8 id; + u32 reg; + u32 mask; +}; + +enum radeon_hpd_id { + RADEON_HPD_NONE = 0, + RADEON_HPD_1, + RADEON_HPD_2, + RADEON_HPD_3, + RADEON_HPD_4, + RADEON_HPD_5, + RADEON_HPD_6, +}; + +struct radeon_hpd { + enum radeon_hpd_id hpd; + u8 plugged_state; + struct radeon_gpio_rec gpio; }; struct radeon_connector { @@ -318,6 +375,7 @@ struct radeon_connector { void *con_priv; bool dac_load_detect; uint16_t connector_object_id; + struct radeon_hpd hpd; }; struct radeon_framebuffer { @@ -325,10 +383,42 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; +extern enum radeon_tv_std +radeon_combios_get_tv_info(struct radeon_device *rdev); +extern enum radeon_tv_std +radeon_atombios_get_tv_info(struct radeon_device *rdev); + +extern void radeon_connector_hotplug(struct drm_connector *connector); +extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); +extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, + struct drm_display_mode *mode); +extern void radeon_dp_set_link_config(struct drm_connector *connector, + struct drm_display_mode *mode); +extern void dp_link_train(struct drm_encoder *encoder, + struct drm_connector *connector); +extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); +extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); +extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, + int action, uint8_t lane_num, + uint8_t lane_set); +extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte); + +extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, + struct radeon_i2c_bus_rec *rec, + const char *name); extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name); extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); +extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 *val); +extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, + u8 slave_addr, + u8 addr, + u8 val); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); @@ -340,8 +430,17 @@ extern void radeon_compute_pll(struct radeon_pll *pll, uint32_t *fb_div_p, uint32_t *frac_fb_div_p, uint32_t *ref_div_p, - uint32_t *post_div_p, - int flags); + uint32_t *post_div_p); + +extern void radeon_compute_pll_avivo(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p); + +extern void radeon_setup_encoder_clones(struct drm_device *dev); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); @@ -349,6 +448,7 @@ struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); +extern void atombios_digital_setup(struct drm_encoder *encoder, int action); extern int atombios_get_encoder_mode(struct drm_encoder *encoder); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); @@ -364,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); -extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc); extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, @@ -378,12 +477,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern struct radeon_encoder_atom_dig * radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); -bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, - struct radeon_encoder_int_tmds *tmds); -bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, - struct radeon_encoder_int_tmds *tmds); -bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, - struct radeon_encoder_int_tmds *tmds); +extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); +extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); +extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); +extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, + struct radeon_encoder_ext_tmds *tmds); +extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, + struct radeon_encoder_ext_tmds *tmds); extern struct radeon_encoder_primary_dac * radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_tv_dac * @@ -395,6 +498,8 @@ extern struct radeon_encoder_tv_dac * radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_primary_dac * radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); +extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder); +extern void radeon_external_tmds_setup(struct drm_encoder *encoder); extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); @@ -426,16 +531,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc); void radeon_legacy_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc); -void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); +extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state); void radeon_get_clock_info(struct drm_device *dev); extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); -void radeon_rmx_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_combios_asic_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f056dadc5c..d72a71bff21 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -34,100 +34,62 @@ #include "radeon_drm.h" #include "radeon.h" -struct radeon_object { - struct ttm_buffer_object tobj; - struct list_head list; - struct radeon_device *rdev; - struct drm_gem_object *gobj; - struct ttm_bo_kmap_obj kmap; - unsigned pin_count; - uint64_t gpu_addr; - void *kptr; - bool is_iomem; - uint32_t tiling_flags; - uint32_t pitch; - int surface_reg; -}; int radeon_ttm_init(struct radeon_device *rdev); void radeon_ttm_fini(struct radeon_device *rdev); +static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); /* * To exclude mutual BO access we rely on bo_reserve exclusion, as all * function are calling it. */ -static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) +static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { - return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); -} + struct radeon_bo *bo; -static void radeon_object_unreserve(struct radeon_object *robj) -{ - ttm_bo_unreserve(&robj->tobj); + bo = container_of(tbo, struct radeon_bo, tbo); + mutex_lock(&bo->rdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&bo->rdev->gem.mutex); + radeon_bo_clear_surface_reg(bo); + kfree(bo); } -static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) +bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) { - struct radeon_object *robj; - - robj = container_of(tobj, struct radeon_object, tobj); - list_del_init(&robj->list); - radeon_object_clear_surface_reg(robj); - kfree(robj); + if (bo->destroy == &radeon_ttm_bo_destroy) + return true; + return false; } -static inline void radeon_object_gpu_addr(struct radeon_object *robj) +void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { - /* Default gpu address */ - robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; - if (robj->tobj.mem.mm_node == NULL) { - return; - } - robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; - switch (robj->tobj.mem.mem_type) { - case TTM_PL_VRAM: - robj->gpu_addr += (u64)robj->rdev->mc.vram_location; - break; - case TTM_PL_TT: - robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; - break; - default: - DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); - robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; - return; - } -} + u32 c = 0; -static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) -{ - uint32_t flags = 0; - if (domain & RADEON_GEM_DOMAIN_VRAM) { - flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } - if (domain & RADEON_GEM_DOMAIN_GTT) { - flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - } - if (domain & RADEON_GEM_DOMAIN_CPU) { - flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; - } - if (!flags) { - flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; - } - return flags; + rbo->placement.fpfn = 0; + rbo->placement.lpfn = 0; + rbo->placement.placement = rbo->placements; + rbo->placement.busy_placement = rbo->placements; + if (domain & RADEON_GEM_DOMAIN_VRAM) + rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + if (domain & RADEON_GEM_DOMAIN_GTT) + rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + if (domain & RADEON_GEM_DOMAIN_CPU) + rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) + rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + rbo->placement.num_placement = c; + rbo->placement.num_busy_placement = c; } -int radeon_object_create(struct radeon_device *rdev, - struct drm_gem_object *gobj, - unsigned long size, - bool kernel, - uint32_t domain, - bool interruptible, - struct radeon_object **robj_ptr) +int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, + unsigned long size, bool kernel, u32 domain, + struct radeon_bo **bo_ptr) { - struct radeon_object *robj; + struct radeon_bo *bo; enum ttm_bo_type type; - uint32_t flags; int r; if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { @@ -138,238 +100,161 @@ int radeon_object_create(struct radeon_device *rdev, } else { type = ttm_bo_type_device; } - *robj_ptr = NULL; - robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); - if (robj == NULL) { + *bo_ptr = NULL; + bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); + if (bo == NULL) return -ENOMEM; - } - robj->rdev = rdev; - robj->gobj = gobj; - robj->surface_reg = -1; - INIT_LIST_HEAD(&robj->list); - - flags = radeon_object_flags_from_domain(domain); - r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, - 0, 0, false, NULL, size, - &radeon_ttm_object_object_destroy); + bo->rdev = rdev; + bo->gobj = gobj; + bo->surface_reg = -1; + INIT_LIST_HEAD(&bo->list); + + radeon_ttm_placement_from_domain(bo, domain); + /* Kernel allocation are uninterruptible */ + r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, + &bo->placement, 0, 0, !kernel, NULL, size, + &radeon_ttm_bo_destroy); if (unlikely(r != 0)) { - /* ttm call radeon_ttm_object_object_destroy if error happen */ - DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", - size, flags, 0); + if (r != -ERESTARTSYS) + dev_err(rdev->dev, + "object_init failed for (%lu, 0x%08X)\n", + size, domain); return r; } - *robj_ptr = robj; + *bo_ptr = bo; if (gobj) { - list_add_tail(&robj->list, &rdev->gem.objects); + mutex_lock(&bo->rdev->gem.mutex); + list_add_tail(&bo->list, &rdev->gem.objects); + mutex_unlock(&bo->rdev->gem.mutex); } return 0; } -int radeon_object_kmap(struct radeon_object *robj, void **ptr) +int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { + bool is_iomem; int r; - spin_lock(&robj->tobj.lock); - if (robj->kptr) { + if (bo->kptr) { if (ptr) { - *ptr = robj->kptr; + *ptr = bo->kptr; } - spin_unlock(&robj->tobj.lock); return 0; } - spin_unlock(&robj->tobj.lock); - r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); if (r) { return r; } - spin_lock(&robj->tobj.lock); - robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); - spin_unlock(&robj->tobj.lock); + bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) { - *ptr = robj->kptr; + *ptr = bo->kptr; } - radeon_object_check_tiling(robj, 0, 0); + radeon_bo_check_tiling(bo, 0, 0); return 0; } -void radeon_object_kunmap(struct radeon_object *robj) +void radeon_bo_kunmap(struct radeon_bo *bo) { - spin_lock(&robj->tobj.lock); - if (robj->kptr == NULL) { - spin_unlock(&robj->tobj.lock); + if (bo->kptr == NULL) return; - } - robj->kptr = NULL; - spin_unlock(&robj->tobj.lock); - radeon_object_check_tiling(robj, 0, 0); - ttm_bo_kunmap(&robj->kmap); + bo->kptr = NULL; + radeon_bo_check_tiling(bo, 0, 0); + ttm_bo_kunmap(&bo->kmap); } -void radeon_object_unref(struct radeon_object **robj) +void radeon_bo_unref(struct radeon_bo **bo) { - struct ttm_buffer_object *tobj; + struct ttm_buffer_object *tbo; - if ((*robj) == NULL) { + if ((*bo) == NULL) return; - } - tobj = &((*robj)->tobj); - ttm_bo_unref(&tobj); - if (tobj == NULL) { - *robj = NULL; - } -} - -int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) -{ - *offset = robj->tobj.addr_space_offset; - return 0; + tbo = &((*bo)->tbo); + ttm_bo_unref(&tbo); + if (tbo == NULL) + *bo = NULL; } -int radeon_object_pin(struct radeon_object *robj, uint32_t domain, - uint64_t *gpu_addr) +int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) { - uint32_t flags; - uint32_t tmp; - int r; + int r, i; - flags = radeon_object_flags_from_domain(domain); - spin_lock(&robj->tobj.lock); - if (robj->pin_count) { - robj->pin_count++; - if (gpu_addr != NULL) { - *gpu_addr = robj->gpu_addr; - } - spin_unlock(&robj->tobj.lock); + radeon_ttm_placement_from_domain(bo, domain); + if (bo->pin_count) { + bo->pin_count++; + if (gpu_addr) + *gpu_addr = radeon_bo_gpu_offset(bo); return 0; } - spin_unlock(&robj->tobj.lock); - r = radeon_object_reserve(robj, false); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); - return r; - } - tmp = robj->tobj.mem.placement; - ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); - robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; - r = ttm_buffer_object_validate(&robj->tobj, - robj->tobj.proposed_placement, - false, false); - radeon_object_gpu_addr(robj); - if (gpu_addr != NULL) { - *gpu_addr = robj->gpu_addr; - } - robj->pin_count = 1; - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to pin object.\n"); - } - radeon_object_unreserve(robj); - return r; -} - -void radeon_object_unpin(struct radeon_object *robj) -{ - uint32_t flags; - int r; - - spin_lock(&robj->tobj.lock); - if (!robj->pin_count) { - spin_unlock(&robj->tobj.lock); - printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); - return; - } - robj->pin_count--; - if (robj->pin_count) { - spin_unlock(&robj->tobj.lock); - return; - } - spin_unlock(&robj->tobj.lock); - r = radeon_object_reserve(robj, false); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); - return; - } - flags = robj->tobj.mem.placement; - robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; - r = ttm_buffer_object_validate(&robj->tobj, - robj->tobj.proposed_placement, - false, false); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to unpin buffer.\n"); - } - radeon_object_unreserve(robj); -} - -int radeon_object_wait(struct radeon_object *robj) -{ - int r = 0; - - /* FIXME: should use block reservation instead */ - r = radeon_object_reserve(robj, true); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to reserve object for waiting.\n"); - return r; - } - spin_lock(&robj->tobj.lock); - if (robj->tobj.sync_obj) { - r = ttm_bo_wait(&robj->tobj, true, true, false); - } - spin_unlock(&robj->tobj.lock); - radeon_object_unreserve(robj); + radeon_ttm_placement_from_domain(bo, domain); + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (likely(r == 0)) { + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = radeon_bo_gpu_offset(bo); + } + if (unlikely(r != 0)) + dev_err(bo->rdev->dev, "%p pin failed\n", bo); return r; } -int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) +int radeon_bo_unpin(struct radeon_bo *bo) { - int r = 0; + int r, i; - r = radeon_object_reserve(robj, true); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to reserve object for waiting.\n"); - return r; - } - spin_lock(&robj->tobj.lock); - *cur_placement = robj->tobj.mem.mem_type; - if (robj->tobj.sync_obj) { - r = ttm_bo_wait(&robj->tobj, true, true, true); + if (!bo->pin_count) { + dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); + return 0; } - spin_unlock(&robj->tobj.lock); - radeon_object_unreserve(robj); + bo->pin_count--; + if (bo->pin_count) + return 0; + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (unlikely(r != 0)) + dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); return r; } -int radeon_object_evict_vram(struct radeon_device *rdev) +int radeon_bo_evict_vram(struct radeon_device *rdev) { - if (rdev->flags & RADEON_IS_IGP) { - /* Useless to evict on IGP chips */ - return 0; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ + if (0 && (rdev->flags & RADEON_IS_IGP)) { + if (rdev->mc.igp_sideport_enabled == false) + /* Useless to evict on IGP chips */ + return 0; } return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); } -void radeon_object_force_delete(struct radeon_device *rdev) +void radeon_bo_force_delete(struct radeon_device *rdev) { - struct radeon_object *robj, *n; + struct radeon_bo *bo, *n; struct drm_gem_object *gobj; if (list_empty(&rdev->gem.objects)) { return; } - DRM_ERROR("Userspace still has active objects !\n"); - list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { + dev_err(rdev->dev, "Userspace still has active objects !\n"); + list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { mutex_lock(&rdev->ddev->struct_mutex); - gobj = robj->gobj; - DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", - gobj, robj, (unsigned long)gobj->size, - *((unsigned long *)&gobj->refcount)); - list_del_init(&robj->list); - radeon_object_unref(&robj); + gobj = bo->gobj; + dev_err(rdev->dev, "%p %p %lu %lu force free\n", + gobj, bo, (unsigned long)gobj->size, + *((unsigned long *)&gobj->refcount)); + mutex_lock(&bo->rdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&bo->rdev->gem.mutex); + radeon_bo_unref(&bo); gobj->driver_private = NULL; drm_gem_object_unreference(gobj); mutex_unlock(&rdev->ddev->struct_mutex); } } -int radeon_object_init(struct radeon_device *rdev) +int radeon_bo_init(struct radeon_device *rdev) { /* Add an MTRR for the VRAM */ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, @@ -382,13 +267,13 @@ int radeon_object_init(struct radeon_device *rdev) return radeon_ttm_init(rdev); } -void radeon_object_fini(struct radeon_device *rdev) +void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); } -void radeon_object_list_add_object(struct radeon_object_list *lobj, - struct list_head *head) +void radeon_bo_list_add_object(struct radeon_bo_list *lobj, + struct list_head *head) { if (lobj->wdomain) { list_add(&lobj->list, head); @@ -397,72 +282,62 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, } } -int radeon_object_list_reserve(struct list_head *head) +int radeon_bo_list_reserve(struct list_head *head) { - struct radeon_object_list *lobj; + struct radeon_bo_list *lobj; int r; list_for_each_entry(lobj, head, list){ - if (!lobj->robj->pin_count) { - r = radeon_object_reserve(lobj->robj, true); - if (unlikely(r != 0)) { - DRM_ERROR("radeon: failed to reserve object.\n"); - return r; - } - } else { - } + r = radeon_bo_reserve(lobj->bo, false); + if (unlikely(r != 0)) + return r; } return 0; } -void radeon_object_list_unreserve(struct list_head *head) +void radeon_bo_list_unreserve(struct list_head *head) { - struct radeon_object_list *lobj; + struct radeon_bo_list *lobj; list_for_each_entry(lobj, head, list) { - if (!lobj->robj->pin_count) { - radeon_object_unreserve(lobj->robj); - } + /* only unreserve object we successfully reserved */ + if (radeon_bo_is_reserved(lobj->bo)) + radeon_bo_unreserve(lobj->bo); } } -int radeon_object_list_validate(struct list_head *head, void *fence) +int radeon_bo_list_validate(struct list_head *head, void *fence) { - struct radeon_object_list *lobj; - struct radeon_object *robj; + struct radeon_bo_list *lobj; + struct radeon_bo *bo; struct radeon_fence *old_fence = NULL; int r; - r = radeon_object_list_reserve(head); + r = radeon_bo_list_reserve(head); if (unlikely(r != 0)) { - radeon_object_list_unreserve(head); return r; } list_for_each_entry(lobj, head, list) { - robj = lobj->robj; - if (!robj->pin_count) { + bo = lobj->bo; + if (!bo->pin_count) { if (lobj->wdomain) { - robj->tobj.proposed_placement = - radeon_object_flags_from_domain(lobj->wdomain); + radeon_ttm_placement_from_domain(bo, + lobj->wdomain); } else { - robj->tobj.proposed_placement = - radeon_object_flags_from_domain(lobj->rdomain); + radeon_ttm_placement_from_domain(bo, + lobj->rdomain); } - r = ttm_buffer_object_validate(&robj->tobj, - robj->tobj.proposed_placement, - true, false); - if (unlikely(r)) { - DRM_ERROR("radeon: failed to validate.\n"); + r = ttm_bo_validate(&bo->tbo, &bo->placement, + true, false); + if (unlikely(r)) return r; - } - radeon_object_gpu_addr(robj); } - lobj->gpu_offset = robj->gpu_addr; - lobj->tiling_flags = robj->tiling_flags; + lobj->gpu_offset = radeon_bo_gpu_offset(bo); + lobj->tiling_flags = bo->tiling_flags; if (fence) { - old_fence = (struct radeon_fence *)robj->tobj.sync_obj; - robj->tobj.sync_obj = radeon_fence_ref(fence); - robj->tobj.sync_obj_arg = NULL; + old_fence = (struct radeon_fence *)bo->tbo.sync_obj; + bo->tbo.sync_obj = radeon_fence_ref(fence); + bo->tbo.sync_obj_arg = NULL; } if (old_fence) { radeon_fence_unref(&old_fence); @@ -471,51 +346,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence) return 0; } -void radeon_object_list_unvalidate(struct list_head *head) +void radeon_bo_list_unvalidate(struct list_head *head, void *fence) { - struct radeon_object_list *lobj; - struct radeon_fence *old_fence = NULL; + struct radeon_bo_list *lobj; + struct radeon_fence *old_fence; - list_for_each_entry(lobj, head, list) { - old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; - lobj->robj->tobj.sync_obj = NULL; - if (old_fence) { - radeon_fence_unref(&old_fence); + if (fence) + list_for_each_entry(lobj, head, list) { + old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); + if (old_fence == fence) { + lobj->bo->tbo.sync_obj = NULL; + radeon_fence_unref(&old_fence); + } } - } - radeon_object_list_unreserve(head); + radeon_bo_list_unreserve(head); } -void radeon_object_list_clean(struct list_head *head) -{ - radeon_object_list_unreserve(head); -} - -int radeon_object_fbdev_mmap(struct radeon_object *robj, +int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) { - return ttm_fbdev_mmap(vma, &robj->tobj); + return ttm_fbdev_mmap(vma, &bo->tbo); } -unsigned long radeon_object_size(struct radeon_object *robj) +int radeon_bo_get_surface_reg(struct radeon_bo *bo) { - return robj->tobj.num_pages << PAGE_SHIFT; -} - -int radeon_object_get_surface_reg(struct radeon_object *robj) -{ - struct radeon_device *rdev = robj->rdev; + struct radeon_device *rdev = bo->rdev; struct radeon_surface_reg *reg; - struct radeon_object *old_object; + struct radeon_bo *old_object; int steal; int i; - if (!robj->tiling_flags) + BUG_ON(!atomic_read(&bo->tbo.reserved)); + + if (!bo->tiling_flags) return 0; - if (robj->surface_reg >= 0) { - reg = &rdev->surface_regs[robj->surface_reg]; - i = robj->surface_reg; + if (bo->surface_reg >= 0) { + reg = &rdev->surface_regs[bo->surface_reg]; + i = bo->surface_reg; goto out; } @@ -523,10 +391,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { reg = &rdev->surface_regs[i]; - if (!reg->robj) + if (!reg->bo) break; - old_object = reg->robj; + old_object = reg->bo; if (old_object->pin_count == 0) steal = i; } @@ -537,91 +405,107 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) return -ENOMEM; /* find someone with a surface reg and nuke their BO */ reg = &rdev->surface_regs[steal]; - old_object = reg->robj; + old_object = reg->bo; /* blow away the mapping */ DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); - ttm_bo_unmap_virtual(&old_object->tobj); + ttm_bo_unmap_virtual(&old_object->tbo); old_object->surface_reg = -1; i = steal; } - robj->surface_reg = i; - reg->robj = robj; + bo->surface_reg = i; + reg->bo = bo; out: - radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, - robj->tobj.mem.mm_node->start << PAGE_SHIFT, - robj->tobj.num_pages << PAGE_SHIFT); + radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, + bo->tbo.mem.mm_node->start << PAGE_SHIFT, + bo->tbo.num_pages << PAGE_SHIFT); return 0; } -void radeon_object_clear_surface_reg(struct radeon_object *robj) +static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) { - struct radeon_device *rdev = robj->rdev; + struct radeon_device *rdev = bo->rdev; struct radeon_surface_reg *reg; - if (robj->surface_reg == -1) + if (bo->surface_reg == -1) return; - reg = &rdev->surface_regs[robj->surface_reg]; - radeon_clear_surface_reg(rdev, robj->surface_reg); + reg = &rdev->surface_regs[bo->surface_reg]; + radeon_clear_surface_reg(rdev, bo->surface_reg); - reg->robj = NULL; - robj->surface_reg = -1; + reg->bo = NULL; + bo->surface_reg = -1; } -void radeon_object_set_tiling_flags(struct radeon_object *robj, - uint32_t tiling_flags, uint32_t pitch) +int radeon_bo_set_tiling_flags(struct radeon_bo *bo, + uint32_t tiling_flags, uint32_t pitch) { - robj->tiling_flags = tiling_flags; - robj->pitch = pitch; + int r; + + r = radeon_bo_reserve(bo, false); + if (unlikely(r != 0)) + return r; + bo->tiling_flags = tiling_flags; + bo->pitch = pitch; + radeon_bo_unreserve(bo); + return 0; } -void radeon_object_get_tiling_flags(struct radeon_object *robj, - uint32_t *tiling_flags, - uint32_t *pitch) +void radeon_bo_get_tiling_flags(struct radeon_bo *bo, + uint32_t *tiling_flags, + uint32_t *pitch) { + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (tiling_flags) - *tiling_flags = robj->tiling_flags; + *tiling_flags = bo->tiling_flags; if (pitch) - *pitch = robj->pitch; + *pitch = bo->pitch; } -int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, - bool force_drop) +int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, + bool force_drop) { - if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) + BUG_ON(!atomic_read(&bo->tbo.reserved)); + + if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; if (force_drop) { - radeon_object_clear_surface_reg(robj); + radeon_bo_clear_surface_reg(bo); return 0; } - if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { + if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { if (!has_moved) return 0; - if (robj->surface_reg >= 0) - radeon_object_clear_surface_reg(robj); + if (bo->surface_reg >= 0) + radeon_bo_clear_surface_reg(bo); return 0; } - if ((robj->surface_reg >= 0) && !has_moved) + if ((bo->surface_reg >= 0) && !has_moved) return 0; - return radeon_object_get_surface_reg(robj); + return radeon_bo_get_surface_reg(bo); } void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *mem) { - struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); - radeon_object_check_tiling(robj, 0, 1); + struct radeon_bo *rbo; + if (!radeon_ttm_bo_is_radeon_bo(bo)) + return; + rbo = container_of(bo, struct radeon_bo, tbo); + radeon_bo_check_tiling(rbo, 0, 1); } void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { - struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); - radeon_object_check_tiling(robj, 0, 0); + struct radeon_bo *rbo; + if (!radeon_ttm_bo_is_radeon_bo(bo)) + return; + rbo = container_of(bo, struct radeon_bo, tbo); + radeon_bo_check_tiling(rbo, 0, 0); } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 10e8af6bb45..a02f18011ad 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -28,19 +28,146 @@ #ifndef __RADEON_OBJECT_H__ #define __RADEON_OBJECT_H__ -#include <ttm/ttm_bo_api.h> -#include <ttm/ttm_bo_driver.h> -#include <ttm/ttm_placement.h> -#include <ttm/ttm_module.h> +#include <drm/radeon_drm.h> +#include "radeon.h" -/* - * TTM. +/** + * radeon_mem_type_to_domain - return domain corresponding to mem_type + * @mem_type: ttm memory type + * + * Returns corresponding domain of the ttm mem_type + */ +static inline unsigned radeon_mem_type_to_domain(u32 mem_type) +{ + switch (mem_type) { + case TTM_PL_VRAM: + return RADEON_GEM_DOMAIN_VRAM; + case TTM_PL_TT: + return RADEON_GEM_DOMAIN_GTT; + case TTM_PL_SYSTEM: + return RADEON_GEM_DOMAIN_CPU; + default: + break; + } + return 0; +} + +/** + * radeon_bo_reserve - reserve bo + * @bo: bo structure + * @no_wait: don't sleep while trying to reserve (return -EBUSY) + * + * Returns: + * -EBUSY: buffer is busy and @no_wait is true + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. */ -struct radeon_mman { - struct ttm_bo_global_ref bo_global_ref; - struct ttm_global_reference mem_global_ref; - bool mem_global_referenced; - struct ttm_bo_device bdev; -}; +static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) +{ + int r; + + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(bo->rdev->dev, "%p reserve failed\n", bo); + return r; + } + return 0; +} + +static inline void radeon_bo_unreserve(struct radeon_bo *bo) +{ + ttm_bo_unreserve(&bo->tbo); +} + +/** + * radeon_bo_gpu_offset - return GPU offset of bo + * @bo: radeon object for which we query the offset + * + * Returns current GPU offset of the object. + * + * Note: object should either be pinned or reserved when calling this + * function, it might be usefull to add check for this for debugging. + */ +static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) +{ + return bo->tbo.offset; +} + +static inline unsigned long radeon_bo_size(struct radeon_bo *bo) +{ + return bo->tbo.num_pages << PAGE_SHIFT; +} + +static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) +{ + return !!atomic_read(&bo->tbo.reserved); +} + +/** + * radeon_bo_mmap_offset - return mmap offset of bo + * @bo: radeon object for which we query the offset + * + * Returns mmap offset of the object. + * + * Note: addr_space_offset is constant after ttm bo init thus isn't protected + * by any lock. + */ +static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) +{ + return bo->tbo.addr_space_offset; +} + +static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, + bool no_wait) +{ + int r; + + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); + return r; + } + spin_lock(&bo->tbo.lock); + if (mem_type) + *mem_type = bo->tbo.mem.mem_type; + if (bo->tbo.sync_obj) + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + spin_unlock(&bo->tbo.lock); + ttm_bo_unreserve(&bo->tbo); + return r; +} +extern int radeon_bo_create(struct radeon_device *rdev, + struct drm_gem_object *gobj, unsigned long size, + bool kernel, u32 domain, + struct radeon_bo **bo_ptr); +extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); +extern void radeon_bo_kunmap(struct radeon_bo *bo); +extern void radeon_bo_unref(struct radeon_bo **bo); +extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); +extern int radeon_bo_unpin(struct radeon_bo *bo); +extern int radeon_bo_evict_vram(struct radeon_device *rdev); +extern void radeon_bo_force_delete(struct radeon_device *rdev); +extern int radeon_bo_init(struct radeon_device *rdev); +extern void radeon_bo_fini(struct radeon_device *rdev); +extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, + struct list_head *head); +extern int radeon_bo_list_reserve(struct list_head *head); +extern void radeon_bo_list_unreserve(struct list_head *head); +extern int radeon_bo_list_validate(struct list_head *head, void *fence); +extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); +extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, + struct vm_area_struct *vma); +extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, + u32 tiling_flags, u32 pitch); +extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, + u32 *tiling_flags, u32 *pitch); +extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, + bool force_drop); +extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); #endif diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 46146c6a2a0..8bce64cdc32 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev); int radeon_pm_init(struct radeon_device *rdev) { if (radeon_debugfs_pm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for CP !\n"); + DRM_ERROR("Failed to register debugfs file for PM!\n"); } return 0; @@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); - seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); + seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); + seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); + if (rdev->asic->get_memory_clock) + seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 29ab75903ec..6d0a009dd4a 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -887,6 +887,7 @@ # define RADEON_FP_PANEL_FORMAT (1 << 3) # define RADEON_FP_EN_TMDS (1 << 7) # define RADEON_FP_DETECT_SENSE (1 << 8) +# define RADEON_FP_DETECT_INT_POL (1 << 9) # define R200_FP_SOURCE_SEL_MASK (3 << 10) # define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) @@ -894,6 +895,7 @@ # define R200_FP_SOURCE_SEL_TRANS (3 << 10) # define RADEON_FP_SEL_CRTC1 (0 << 13) # define RADEON_FP_SEL_CRTC2 (1 << 13) +# define R300_HPD_SEL(x) ((x) << 13) # define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) # define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) # define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) @@ -909,6 +911,7 @@ # define RADEON_FP2_ON (1 << 2) # define RADEON_FP2_PANEL_FORMAT (1 << 3) # define RADEON_FP2_DETECT_SENSE (1 << 8) +# define RADEON_FP2_DETECT_INT_POL (1 << 9) # define R200_FP2_SOURCE_SEL_MASK (3 << 10) # define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) # define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) @@ -988,14 +991,20 @@ #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_FP_DETECT_MASK (1 << 4) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) +# define RADEON_FP2_DETECT_MASK (1 << 10) # define RADEON_SW_INT_ENABLE (1 << 25) #define RADEON_GEN_INT_STATUS 0x0044 # define AVIVO_DISPLAY_INT_STATUS (1 << 0) # define RADEON_CRTC_VBLANK_STAT (1 << 0) # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_FP_DETECT_STAT (1 << 4) +# define RADEON_FP_DETECT_STAT_ACK (1 << 4) # define RADEON_CRTC2_VBLANK_STAT (1 << 9) # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) +# define RADEON_FP2_DETECT_STAT (1 << 10) +# define RADEON_FP2_DETECT_STAT_ACK (1 << 10) # define RADEON_SW_INT_FIRE (1 << 26) # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) @@ -1051,20 +1060,25 @@ /* Multimedia I2C bus */ #define RADEON_I2C_CNTL_0 0x0090 -#define RADEON_I2C_DONE (1<<0) -#define RADEON_I2C_NACK (1<<1) -#define RADEON_I2C_HALT (1<<2) -#define RADEON_I2C_SOFT_RST (1<<5) -#define RADEON_I2C_DRIVE_EN (1<<6) -#define RADEON_I2C_DRIVE_SEL (1<<7) -#define RADEON_I2C_START (1<<8) -#define RADEON_I2C_STOP (1<<9) -#define RADEON_I2C_RECEIVE (1<<10) -#define RADEON_I2C_ABORT (1<<11) -#define RADEON_I2C_GO (1<<12) +#define RADEON_I2C_DONE (1 << 0) +#define RADEON_I2C_NACK (1 << 1) +#define RADEON_I2C_HALT (1 << 2) +#define RADEON_I2C_SOFT_RST (1 << 5) +#define RADEON_I2C_DRIVE_EN (1 << 6) +#define RADEON_I2C_DRIVE_SEL (1 << 7) +#define RADEON_I2C_START (1 << 8) +#define RADEON_I2C_STOP (1 << 9) +#define RADEON_I2C_RECEIVE (1 << 10) +#define RADEON_I2C_ABORT (1 << 11) +#define RADEON_I2C_GO (1 << 12) +#define RADEON_I2C_PRESCALE_SHIFT 16 #define RADEON_I2C_CNTL_1 0x0094 -#define RADEON_I2C_SEL (1<<16) -#define RADEON_I2C_EN (1<<17) +#define RADEON_I2C_DATA_COUNT_SHIFT 0 +#define RADEON_I2C_ADDR_COUNT_SHIFT 4 +#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 +#define RADEON_I2C_SEL (1 << 16) +#define RADEON_I2C_EN (1 << 17) +#define RADEON_I2C_TIME_LIMIT_SHIFT 24 #define RADEON_I2C_DATA 0x0098 #define RADEON_DVI_I2C_CNTL_0 0x02e0 @@ -1072,7 +1086,7 @@ # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ -#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ +#define RADEON_DVI_I2C_CNTL_1 0x02e4 #define RADEON_DVI_I2C_DATA 0x02e8 #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ @@ -1143,15 +1157,16 @@ # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) -#define RADEON_LCD_GPIO_MASK 0x01a0 -#define RADEON_GPIOPAD_EN 0x01a0 -#define RADEON_LCD_GPIO_Y_REG 0x01a4 -#define RADEON_MDGPIO_A_REG 0x01ac -#define RADEON_MDGPIO_EN_REG 0x01b0 -#define RADEON_MDGPIO_MASK 0x0198 + #define RADEON_GPIOPAD_MASK 0x0198 #define RADEON_GPIOPAD_A 0x019c -#define RADEON_MDGPIO_Y_REG 0x01b4 +#define RADEON_GPIOPAD_EN 0x01a0 +#define RADEON_GPIOPAD_Y 0x01a4 +#define RADEON_MDGPIO_MASK 0x01a8 +#define RADEON_MDGPIO_A 0x01ac +#define RADEON_MDGPIO_EN 0x01b0 +#define RADEON_MDGPIO_Y 0x01b4 + #define RADEON_MEM_ADDR_CONFIG 0x0148 #define RADEON_MEM_BASE 0x0f10 /* PCI */ #define RADEON_MEM_CNTL 0x0140 @@ -1360,6 +1375,9 @@ #define RADEON_OVR_CLR 0x0230 #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 +#define RADEON_OVR2_CLR 0x0330 +#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334 +#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338 /* first capture unit */ diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 747b4bffb84..4d12b2d17b4 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev) return 0; /* Allocate 1M object buffer */ INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); - r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, - true, RADEON_GEM_DOMAIN_GTT, - false, &rdev->ib_pool.robj); + r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, + true, RADEON_GEM_DOMAIN_GTT, + &rdev->ib_pool.robj); if (r) { DRM_ERROR("radeon: failed to ib pool (%d).\n", r); return r; } - r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); + r = radeon_bo_reserve(rdev->ib_pool.robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); if (r) { + radeon_bo_unreserve(rdev->ib_pool.robj); DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); return r; } - r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); + r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); + radeon_bo_unreserve(rdev->ib_pool.robj); if (r) { DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); return r; @@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev) void radeon_ib_pool_fini(struct radeon_device *rdev) { + int r; + if (!rdev->ib_pool.ready) { return; } mutex_lock(&rdev->ib_pool.mutex); bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); if (rdev->ib_pool.robj) { - radeon_object_kunmap(rdev->ib_pool.robj); - radeon_object_unref(&rdev->ib_pool.robj); + r = radeon_bo_reserve(rdev->ib_pool.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->ib_pool.robj); + radeon_bo_unpin(rdev->ib_pool.robj); + radeon_bo_unreserve(rdev->ib_pool.robj); + } + radeon_bo_unref(&rdev->ib_pool.robj); rdev->ib_pool.robj = NULL; } mutex_unlock(&rdev->ib_pool.mutex); @@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.ring_size = ring_size; /* Allocate ring buffer */ if (rdev->cp.ring_obj == NULL) { - r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, - true, - RADEON_GEM_DOMAIN_GTT, - false, - &rdev->cp.ring_obj); + r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, + RADEON_GEM_DOMAIN_GTT, + &rdev->cp.ring_obj); if (r) { - DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + dev_err(rdev->dev, "(%d) ring create failed\n", r); return r; } - r = radeon_object_pin(rdev->cp.ring_obj, - RADEON_GEM_DOMAIN_GTT, - &rdev->cp.gpu_addr); + r = radeon_bo_reserve(rdev->cp.ring_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->cp.gpu_addr); if (r) { - DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + radeon_bo_unreserve(rdev->cp.ring_obj); + dev_err(rdev->dev, "(%d) ring pin failed\n", r); return r; } - r = radeon_object_kmap(rdev->cp.ring_obj, + r = radeon_bo_kmap(rdev->cp.ring_obj, (void **)&rdev->cp.ring); + radeon_bo_unreserve(rdev->cp.ring_obj); if (r) { - DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + dev_err(rdev->dev, "(%d) ring map failed\n", r); return r; } } @@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) void radeon_ring_fini(struct radeon_device *rdev) { + int r; + mutex_lock(&rdev->cp.mutex); if (rdev->cp.ring_obj) { - radeon_object_kunmap(rdev->cp.ring_obj); - radeon_object_unpin(rdev->cp.ring_obj); - radeon_object_unref(&rdev->cp.ring_obj); + r = radeon_bo_reserve(rdev->cp.ring_obj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->cp.ring_obj); + radeon_bo_unpin(rdev->cp.ring_obj); + radeon_bo_unreserve(rdev->cp.ring_obj); + } + radeon_bo_unref(&rdev->cp.ring_obj); rdev->cp.ring = NULL; rdev->cp.ring_obj = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index f8a465d9a1c..9f5e2f929da 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -30,8 +30,8 @@ /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ void radeon_test_moves(struct radeon_device *rdev) { - struct radeon_object *vram_obj = NULL; - struct radeon_object **gtt_obj = NULL; + struct radeon_bo *vram_obj = NULL; + struct radeon_bo **gtt_obj = NULL; struct radeon_fence *fence = NULL; uint64_t gtt_addr, vram_addr; unsigned i, n, size; @@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev) /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffer) / test size */ - n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - - rdev->cp.ring_size) / size; + n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - + rdev->cp.ring_size)) / size; gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { @@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev) goto out_cleanup; } - r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, - false, &vram_obj); + r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, + &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; } - - r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); + r = radeon_bo_reserve(vram_obj, false); + if (unlikely(r != 0)) + goto out_cleanup; + r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); if (r) { DRM_ERROR("Failed to pin VRAM object\n"); goto out_cleanup; } - for (i = 0; i < n; i++) { void *gtt_map, *vram_map; void **gtt_start, **gtt_end; void **vram_start, **vram_end; - r = radeon_object_create(rdev, NULL, size, true, - RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); + r = radeon_bo_create(rdev, NULL, size, true, + RADEON_GEM_DOMAIN_GTT, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_cleanup; } - r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); + r = radeon_bo_reserve(gtt_obj[i], false); + if (unlikely(r != 0)) + goto out_cleanup; + r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); if (r) { DRM_ERROR("Failed to pin GTT object %d\n", i); goto out_cleanup; } - r = radeon_object_kmap(gtt_obj[i], >t_map); + r = radeon_bo_kmap(gtt_obj[i], >t_map); if (r) { DRM_ERROR("Failed to map GTT object %d\n", i); goto out_cleanup; @@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev) gtt_start++) *gtt_start = gtt_start; - radeon_object_kunmap(gtt_obj[i]); + radeon_bo_kunmap(gtt_obj[i]); r = radeon_fence_create(rdev, &fence); if (r) { @@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev) radeon_fence_unref(&fence); - r = radeon_object_kmap(vram_obj, &vram_map); + r = radeon_bo_kmap(vram_obj, &vram_map); if (r) { DRM_ERROR("Failed to map VRAM object after copy %d\n", i); goto out_cleanup; @@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev) "expected 0x%p (GTT map 0x%p-0x%p)\n", i, *vram_start, gtt_start, gtt_map, gtt_end); - radeon_object_kunmap(vram_obj); + radeon_bo_kunmap(vram_obj); goto out_cleanup; } *vram_start = vram_start; } - radeon_object_kunmap(vram_obj); + radeon_bo_kunmap(vram_obj); r = radeon_fence_create(rdev, &fence); if (r) { @@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev) radeon_fence_unref(&fence); - r = radeon_object_kmap(gtt_obj[i], >t_map); + r = radeon_bo_kmap(gtt_obj[i], >t_map); if (r) { DRM_ERROR("Failed to map GTT object after copy %d\n", i); goto out_cleanup; @@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev) "expected 0x%p (VRAM map 0x%p-0x%p)\n", i, *gtt_start, vram_start, vram_map, vram_end); - radeon_object_kunmap(gtt_obj[i]); + radeon_bo_kunmap(gtt_obj[i]); goto out_cleanup; } } - radeon_object_kunmap(gtt_obj[i]); + radeon_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", gtt_addr - rdev->mc.gtt_location); @@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev) out_cleanup: if (vram_obj) { - radeon_object_unpin(vram_obj); - radeon_object_unref(&vram_obj); + if (radeon_bo_is_reserved(vram_obj)) { + radeon_bo_unpin(vram_obj); + radeon_bo_unreserve(vram_obj); + } + radeon_bo_unref(&vram_obj); } if (gtt_obj) { for (i = 0; i < n; i++) { if (gtt_obj[i]) { - radeon_object_unpin(gtt_obj[i]); - radeon_object_unref(>t_obj[i]); + if (radeon_bo_is_reserved(gtt_obj[i])) { + radeon_bo_unpin(gtt_obj[i]); + radeon_bo_unreserve(gtt_obj[i]); + } + radeon_bo_unref(>t_obj[i]); } } kfree(gtt_obj); @@ -206,4 +216,3 @@ out_cleanup: printk(KERN_WARNING "Error while testing BO move.\n"); } } - diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index eda4ade24c3..58b5adf974c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: - man->gpu_offset = 0; + man->gpu_offset = rdev->mc.gtt_location; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ - man->gpu_offset = 0; + man->gpu_offset = rdev->mc.vram_location; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; @@ -197,16 +197,34 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, return 0; } -static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) +static void radeon_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) { - uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; + struct radeon_bo *rbo; + static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!radeon_ttm_bo_is_radeon_bo(bo)) { + placement->fpfn = 0; + placement->lpfn = 0; + placement->placement = &placements; + placement->busy_placement = &placements; + placement->num_placement = 1; + placement->num_busy_placement = 1; + return; + } + rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + if (rbo->rdev->cp.ready == false) + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); + else + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + break; + case TTM_PL_TT: default: - return (cur_placement & ~TTM_PL_MASK_CACHING) | - TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_CACHED; + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); } + *placement = rbo->placement; } static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) @@ -283,14 +301,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; - uint32_t proposed_placement; + u32 placements; + struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, + placement.fpfn = 0; + placement.lpfn = 0; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); if (unlikely(r)) { return r; @@ -329,15 +354,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; - uint32_t proposed_flags; + struct ttm_placement placement; + u32 placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, - interruptible, no_wait); + placement.fpfn = 0; + placement.lpfn = 0; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); if (unlikely(r)) { return r; } @@ -407,18 +438,6 @@ memcpy: return r; } -const uint32_t radeon_mem_prios[] = { - TTM_PL_VRAM, - TTM_PL_TT, - TTM_PL_SYSTEM, -}; - -const uint32_t radeon_busy_prios[] = { - TTM_PL_TT, - TTM_PL_VRAM, - TTM_PL_SYSTEM, -}; - static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, bool lazy, bool interruptible) { @@ -446,10 +465,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) } static struct ttm_bo_driver radeon_bo_driver = { - .mem_type_prio = radeon_mem_prios, - .mem_busy_prio = radeon_busy_prios, - .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios), - .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios), .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, .invalidate_caches = &radeon_invalidate_caches, .init_mem_type = &radeon_init_mem_type, @@ -482,27 +497,32 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } - r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, - ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); + rdev->mman.initialized = true; + r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, + rdev->mc.real_vram_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } - r = radeon_object_create(rdev, NULL, 256 * 1024, true, - RADEON_GEM_DOMAIN_VRAM, false, - &rdev->stollen_vga_memory); + r = radeon_bo_create(rdev, NULL, 256 * 1024, true, + RADEON_GEM_DOMAIN_VRAM, + &rdev->stollen_vga_memory); if (r) { return r; } - r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); + r = radeon_bo_reserve(rdev->stollen_vga_memory, false); + if (r) + return r; + r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); + radeon_bo_unreserve(rdev->stollen_vga_memory); if (r) { - radeon_object_unref(&rdev->stollen_vga_memory); + radeon_bo_unref(&rdev->stollen_vga_memory); return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); - r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, - ((rdev->mc.gtt_size) >> PAGE_SHIFT)); + r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, + rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; @@ -523,15 +543,24 @@ int radeon_ttm_init(struct radeon_device *rdev) void radeon_ttm_fini(struct radeon_device *rdev) { + int r; + + if (!rdev->mman.initialized) + return; if (rdev->stollen_vga_memory) { - radeon_object_unpin(rdev->stollen_vga_memory); - radeon_object_unref(&rdev->stollen_vga_memory); + r = radeon_bo_reserve(rdev->stollen_vga_memory, false); + if (r == 0) { + radeon_bo_unpin(rdev->stollen_vga_memory); + radeon_bo_unreserve(rdev->stollen_vga_memory); + } + radeon_bo_unref(&rdev->stollen_vga_memory); } ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); ttm_bo_device_release(&rdev->mman.bdev); radeon_gart_fini(rdev); radeon_ttm_global_fini(rdev); + rdev->mman.initialized = false; DRM_INFO("radeon: ttm finalized\n"); } diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200 index 6021c8849a1..c29ac434ac9 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r200 +++ b/drivers/gpu/drm/radeon/reg_srcs/r200 @@ -91,6 +91,8 @@ r200 0x3294 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 0x22c4 SE_TCL_POINT_SPRITE_CNTL +0x22d0 SE_PVS_CNTL +0x22d4 SE_PVS_CONST_CNTL 0x2648 RE_POINTSIZE 0x26c0 RE_TOP_LEFT 0x26c4 RE_MISC diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 new file mode 100644 index 00000000000..989f7a02083 --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 @@ -0,0 +1,795 @@ +r420 0x4f60 +0x1434 SRC_Y_X +0x1438 DST_Y_X +0x143C DST_HEIGHT_WIDTH +0x146C DP_GUI_MASTER_CNTL +0x1474 BRUSH_Y_X +0x1478 DP_BRUSH_BKGD_CLR +0x147C DP_BRUSH_FRGD_CLR +0x1480 BRUSH_DATA0 +0x1484 BRUSH_DATA1 +0x1598 DST_WIDTH_HEIGHT +0x15C0 CLR_CMP_CNTL +0x15C4 CLR_CMP_CLR_SRC +0x15C8 CLR_CMP_CLR_DST +0x15CC CLR_CMP_MSK +0x15D8 DP_SRC_FRGD_CLR +0x15DC DP_SRC_BKGD_CLR +0x1600 DST_LINE_START +0x1604 DST_LINE_END +0x1608 DST_LINE_PATCOUNT +0x16C0 DP_CNTL +0x16CC DP_WRITE_MSK +0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR +0x16E8 DEFAULT_SC_BOTTOM_RIGHT +0x16EC SC_TOP_LEFT +0x16F0 SC_BOTTOM_RIGHT +0x16F4 SRC_SC_BOTTOM_RIGHT +0x1714 DSTCACHE_CTLSTAT +0x1720 WAIT_UNTIL +0x172C RBBM_GUICNTL +0x1D98 VAP_VPORT_XSCALE +0x1D9C VAP_VPORT_XOFFSET +0x1DA0 VAP_VPORT_YSCALE +0x1DA4 VAP_VPORT_YOFFSET +0x1DA8 VAP_VPORT_ZSCALE +0x1DAC VAP_VPORT_ZOFFSET +0x2080 VAP_CNTL +0x2090 VAP_OUT_VTX_FMT_0 +0x2094 VAP_OUT_VTX_FMT_1 +0x20B0 VAP_VTE_CNTL +0x2138 VAP_VF_MIN_VTX_INDX +0x2140 VAP_CNTL_STATUS +0x2150 VAP_PROG_STREAM_CNTL_0 +0x2154 VAP_PROG_STREAM_CNTL_1 +0x2158 VAP_PROG_STREAM_CNTL_2 +0x215C VAP_PROG_STREAM_CNTL_3 +0x2160 VAP_PROG_STREAM_CNTL_4 +0x2164 VAP_PROG_STREAM_CNTL_5 +0x2168 VAP_PROG_STREAM_CNTL_6 +0x216C VAP_PROG_STREAM_CNTL_7 +0x2180 VAP_VTX_STATE_CNTL +0x2184 VAP_VSM_VTX_ASSM +0x2188 VAP_VTX_STATE_IND_REG_0 +0x218C VAP_VTX_STATE_IND_REG_1 +0x2190 VAP_VTX_STATE_IND_REG_2 +0x2194 VAP_VTX_STATE_IND_REG_3 +0x2198 VAP_VTX_STATE_IND_REG_4 +0x219C VAP_VTX_STATE_IND_REG_5 +0x21A0 VAP_VTX_STATE_IND_REG_6 +0x21A4 VAP_VTX_STATE_IND_REG_7 +0x21A8 VAP_VTX_STATE_IND_REG_8 +0x21AC VAP_VTX_STATE_IND_REG_9 +0x21B0 VAP_VTX_STATE_IND_REG_10 +0x21B4 VAP_VTX_STATE_IND_REG_11 +0x21B8 VAP_VTX_STATE_IND_REG_12 +0x21BC VAP_VTX_STATE_IND_REG_13 +0x21C0 VAP_VTX_STATE_IND_REG_14 +0x21C4 VAP_VTX_STATE_IND_REG_15 +0x21DC VAP_PSC_SGN_NORM_CNTL +0x21E0 VAP_PROG_STREAM_CNTL_EXT_0 +0x21E4 VAP_PROG_STREAM_CNTL_EXT_1 +0x21E8 VAP_PROG_STREAM_CNTL_EXT_2 +0x21EC VAP_PROG_STREAM_CNTL_EXT_3 +0x21F0 VAP_PROG_STREAM_CNTL_EXT_4 +0x21F4 VAP_PROG_STREAM_CNTL_EXT_5 +0x21F8 VAP_PROG_STREAM_CNTL_EXT_6 +0x21FC VAP_PROG_STREAM_CNTL_EXT_7 +0x2200 VAP_PVS_VECTOR_INDX_REG +0x2204 VAP_PVS_VECTOR_DATA_REG +0x2208 VAP_PVS_VECTOR_DATA_REG_128 +0x221C VAP_CLIP_CNTL +0x2220 VAP_GB_VERT_CLIP_ADJ +0x2224 VAP_GB_VERT_DISC_ADJ +0x2228 VAP_GB_HORZ_CLIP_ADJ +0x222C VAP_GB_HORZ_DISC_ADJ +0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0 +0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1 +0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2 +0x223C VAP_PVS_FLOW_CNTL_ADDRS_3 +0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4 +0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5 +0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6 +0x224C VAP_PVS_FLOW_CNTL_ADDRS_7 +0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8 +0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9 +0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10 +0x225C VAP_PVS_FLOW_CNTL_ADDRS_11 +0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12 +0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13 +0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14 +0x226C VAP_PVS_FLOW_CNTL_ADDRS_15 +0x2284 VAP_PVS_STATE_FLUSH_REG +0x2288 VAP_PVS_VTX_TIMEOUT_REG +0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0 +0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1 +0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2 +0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3 +0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4 +0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5 +0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6 +0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7 +0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8 +0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9 +0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10 +0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11 +0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12 +0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13 +0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14 +0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15 +0x22D0 VAP_PVS_CODE_CNTL_0 +0x22D4 VAP_PVS_CONST_CNTL +0x22D8 VAP_PVS_CODE_CNTL_1 +0x22DC VAP_PVS_FLOW_CNTL_OPC +0x342C RB2D_DSTCACHE_CTLSTAT +0x4000 GB_VAP_RASTER_VTX_FMT_0 +0x4004 GB_VAP_RASTER_VTX_FMT_1 +0x4008 GB_ENABLE +0x401C GB_SELECT +0x4020 GB_AA_CONFIG +0x4024 GB_FIFO_SIZE +0x4100 TX_INVALTAGS +0x4200 GA_POINT_S0 +0x4204 GA_POINT_T0 +0x4208 GA_POINT_S1 +0x420C GA_POINT_T1 +0x4214 GA_TRIANGLE_STIPPLE +0x421C GA_POINT_SIZE +0x4230 GA_POINT_MINMAX +0x4234 GA_LINE_CNTL +0x4238 GA_LINE_STIPPLE_CONFIG +0x4260 GA_LINE_STIPPLE_VALUE +0x4264 GA_LINE_S0 +0x4268 GA_LINE_S1 +0x4278 GA_COLOR_CONTROL +0x427C GA_SOLID_RG +0x4280 GA_SOLID_BA +0x4288 GA_POLY_MODE +0x428C GA_ROUND_MODE +0x4290 GA_OFFSET +0x4294 GA_FOG_SCALE +0x4298 GA_FOG_OFFSET +0x42A0 SU_TEX_WRAP +0x42A4 SU_POLY_OFFSET_FRONT_SCALE +0x42A8 SU_POLY_OFFSET_FRONT_OFFSET +0x42AC SU_POLY_OFFSET_BACK_SCALE +0x42B0 SU_POLY_OFFSET_BACK_OFFSET +0x42B4 SU_POLY_OFFSET_ENABLE +0x42B8 SU_CULL_MODE +0x42C0 SU_DEPTH_SCALE +0x42C4 SU_DEPTH_OFFSET +0x42C8 SU_REG_DEST +0x4300 RS_COUNT +0x4304 RS_INST_COUNT +0x4310 RS_IP_0 +0x4314 RS_IP_1 +0x4318 RS_IP_2 +0x431C RS_IP_3 +0x4320 RS_IP_4 +0x4324 RS_IP_5 +0x4328 RS_IP_6 +0x432C RS_IP_7 +0x4330 RS_INST_0 +0x4334 RS_INST_1 +0x4338 RS_INST_2 +0x433C RS_INST_3 +0x4340 RS_INST_4 +0x4344 RS_INST_5 +0x4348 RS_INST_6 +0x434C RS_INST_7 +0x4350 RS_INST_8 +0x4354 RS_INST_9 +0x4358 RS_INST_10 +0x435C RS_INST_11 +0x4360 RS_INST_12 +0x4364 RS_INST_13 +0x4368 RS_INST_14 +0x436C RS_INST_15 +0x43A4 SC_HYPERZ_EN +0x43A8 SC_EDGERULE +0x43B0 SC_CLIP_0_A +0x43B4 SC_CLIP_0_B +0x43B8 SC_CLIP_1_A +0x43BC SC_CLIP_1_B +0x43C0 SC_CLIP_2_A +0x43C4 SC_CLIP_2_B +0x43C8 SC_CLIP_3_A +0x43CC SC_CLIP_3_B +0x43D0 SC_CLIP_RULE +0x43E0 SC_SCISSOR0 +0x43E8 SC_SCREENDOOR +0x4440 TX_FILTER1_0 +0x4444 TX_FILTER1_1 +0x4448 TX_FILTER1_2 +0x444C TX_FILTER1_3 +0x4450 TX_FILTER1_4 +0x4454 TX_FILTER1_5 +0x4458 TX_FILTER1_6 +0x445C TX_FILTER1_7 +0x4460 TX_FILTER1_8 +0x4464 TX_FILTER1_9 +0x4468 TX_FILTER1_10 +0x446C TX_FILTER1_11 +0x4470 TX_FILTER1_12 +0x4474 TX_FILTER1_13 +0x4478 TX_FILTER1_14 +0x447C TX_FILTER1_15 +0x4580 TX_CHROMA_KEY_0 +0x4584 TX_CHROMA_KEY_1 +0x4588 TX_CHROMA_KEY_2 +0x458C TX_CHROMA_KEY_3 +0x4590 TX_CHROMA_KEY_4 +0x4594 TX_CHROMA_KEY_5 +0x4598 TX_CHROMA_KEY_6 +0x459C TX_CHROMA_KEY_7 +0x45A0 TX_CHROMA_KEY_8 +0x45A4 TX_CHROMA_KEY_9 +0x45A8 TX_CHROMA_KEY_10 +0x45AC TX_CHROMA_KEY_11 +0x45B0 TX_CHROMA_KEY_12 +0x45B4 TX_CHROMA_KEY_13 +0x45B8 TX_CHROMA_KEY_14 +0x45BC TX_CHROMA_KEY_15 +0x45C0 TX_BORDER_COLOR_0 +0x45C4 TX_BORDER_COLOR_1 +0x45C8 TX_BORDER_COLOR_2 +0x45CC TX_BORDER_COLOR_3 +0x45D0 TX_BORDER_COLOR_4 +0x45D4 TX_BORDER_COLOR_5 +0x45D8 TX_BORDER_COLOR_6 +0x45DC TX_BORDER_COLOR_7 +0x45E0 TX_BORDER_COLOR_8 +0x45E4 TX_BORDER_COLOR_9 +0x45E8 TX_BORDER_COLOR_10 +0x45EC TX_BORDER_COLOR_11 +0x45F0 TX_BORDER_COLOR_12 +0x45F4 TX_BORDER_COLOR_13 +0x45F8 TX_BORDER_COLOR_14 +0x45FC TX_BORDER_COLOR_15 +0x4600 US_CONFIG +0x4604 US_PIXSIZE +0x4608 US_CODE_OFFSET +0x460C US_RESET +0x4610 US_CODE_ADDR_0 +0x4614 US_CODE_ADDR_1 +0x4618 US_CODE_ADDR_2 +0x461C US_CODE_ADDR_3 +0x4620 US_TEX_INST_0 +0x4624 US_TEX_INST_1 +0x4628 US_TEX_INST_2 +0x462C US_TEX_INST_3 +0x4630 US_TEX_INST_4 +0x4634 US_TEX_INST_5 +0x4638 US_TEX_INST_6 +0x463C US_TEX_INST_7 +0x4640 US_TEX_INST_8 +0x4644 US_TEX_INST_9 +0x4648 US_TEX_INST_10 +0x464C US_TEX_INST_11 +0x4650 US_TEX_INST_12 +0x4654 US_TEX_INST_13 +0x4658 US_TEX_INST_14 +0x465C US_TEX_INST_15 +0x4660 US_TEX_INST_16 +0x4664 US_TEX_INST_17 +0x4668 US_TEX_INST_18 +0x466C US_TEX_INST_19 +0x4670 US_TEX_INST_20 +0x4674 US_TEX_INST_21 +0x4678 US_TEX_INST_22 +0x467C US_TEX_INST_23 +0x4680 US_TEX_INST_24 +0x4684 US_TEX_INST_25 +0x4688 US_TEX_INST_26 +0x468C US_TEX_INST_27 +0x4690 US_TEX_INST_28 +0x4694 US_TEX_INST_29 +0x4698 US_TEX_INST_30 +0x469C US_TEX_INST_31 +0x46A4 US_OUT_FMT_0 +0x46A8 US_OUT_FMT_1 +0x46AC US_OUT_FMT_2 +0x46B0 US_OUT_FMT_3 +0x46B4 US_W_FMT +0x46B8 US_CODE_BANK +0x46BC US_CODE_EXT +0x46C0 US_ALU_RGB_ADDR_0 +0x46C4 US_ALU_RGB_ADDR_1 +0x46C8 US_ALU_RGB_ADDR_2 +0x46CC US_ALU_RGB_ADDR_3 +0x46D0 US_ALU_RGB_ADDR_4 +0x46D4 US_ALU_RGB_ADDR_5 +0x46D8 US_ALU_RGB_ADDR_6 +0x46DC US_ALU_RGB_ADDR_7 +0x46E0 US_ALU_RGB_ADDR_8 +0x46E4 US_ALU_RGB_ADDR_9 +0x46E8 US_ALU_RGB_ADDR_10 +0x46EC US_ALU_RGB_ADDR_11 +0x46F0 US_ALU_RGB_ADDR_12 +0x46F4 US_ALU_RGB_ADDR_13 +0x46F8 US_ALU_RGB_ADDR_14 +0x46FC US_ALU_RGB_ADDR_15 +0x4700 US_ALU_RGB_ADDR_16 +0x4704 US_ALU_RGB_ADDR_17 +0x4708 US_ALU_RGB_ADDR_18 +0x470C US_ALU_RGB_ADDR_19 +0x4710 US_ALU_RGB_ADDR_20 +0x4714 US_ALU_RGB_ADDR_21 +0x4718 US_ALU_RGB_ADDR_22 +0x471C US_ALU_RGB_ADDR_23 +0x4720 US_ALU_RGB_ADDR_24 +0x4724 US_ALU_RGB_ADDR_25 +0x4728 US_ALU_RGB_ADDR_26 +0x472C US_ALU_RGB_ADDR_27 +0x4730 US_ALU_RGB_ADDR_28 +0x4734 US_ALU_RGB_ADDR_29 +0x4738 US_ALU_RGB_ADDR_30 +0x473C US_ALU_RGB_ADDR_31 +0x4740 US_ALU_RGB_ADDR_32 +0x4744 US_ALU_RGB_ADDR_33 +0x4748 US_ALU_RGB_ADDR_34 +0x474C US_ALU_RGB_ADDR_35 +0x4750 US_ALU_RGB_ADDR_36 +0x4754 US_ALU_RGB_ADDR_37 +0x4758 US_ALU_RGB_ADDR_38 +0x475C US_ALU_RGB_ADDR_39 +0x4760 US_ALU_RGB_ADDR_40 +0x4764 US_ALU_RGB_ADDR_41 +0x4768 US_ALU_RGB_ADDR_42 +0x476C US_ALU_RGB_ADDR_43 +0x4770 US_ALU_RGB_ADDR_44 +0x4774 US_ALU_RGB_ADDR_45 +0x4778 US_ALU_RGB_ADDR_46 +0x477C US_ALU_RGB_ADDR_47 +0x4780 US_ALU_RGB_ADDR_48 +0x4784 US_ALU_RGB_ADDR_49 +0x4788 US_ALU_RGB_ADDR_50 +0x478C US_ALU_RGB_ADDR_51 +0x4790 US_ALU_RGB_ADDR_52 +0x4794 US_ALU_RGB_ADDR_53 +0x4798 US_ALU_RGB_ADDR_54 +0x479C US_ALU_RGB_ADDR_55 +0x47A0 US_ALU_RGB_ADDR_56 +0x47A4 US_ALU_RGB_ADDR_57 +0x47A8 US_ALU_RGB_ADDR_58 +0x47AC US_ALU_RGB_ADDR_59 +0x47B0 US_ALU_RGB_ADDR_60 +0x47B4 US_ALU_RGB_ADDR_61 +0x47B8 US_ALU_RGB_ADDR_62 +0x47BC US_ALU_RGB_ADDR_63 +0x47C0 US_ALU_ALPHA_ADDR_0 +0x47C4 US_ALU_ALPHA_ADDR_1 +0x47C8 US_ALU_ALPHA_ADDR_2 +0x47CC US_ALU_ALPHA_ADDR_3 +0x47D0 US_ALU_ALPHA_ADDR_4 +0x47D4 US_ALU_ALPHA_ADDR_5 +0x47D8 US_ALU_ALPHA_ADDR_6 +0x47DC US_ALU_ALPHA_ADDR_7 +0x47E0 US_ALU_ALPHA_ADDR_8 +0x47E4 US_ALU_ALPHA_ADDR_9 +0x47E8 US_ALU_ALPHA_ADDR_10 +0x47EC US_ALU_ALPHA_ADDR_11 +0x47F0 US_ALU_ALPHA_ADDR_12 +0x47F4 US_ALU_ALPHA_ADDR_13 +0x47F8 US_ALU_ALPHA_ADDR_14 +0x47FC US_ALU_ALPHA_ADDR_15 +0x4800 US_ALU_ALPHA_ADDR_16 +0x4804 US_ALU_ALPHA_ADDR_17 +0x4808 US_ALU_ALPHA_ADDR_18 +0x480C US_ALU_ALPHA_ADDR_19 +0x4810 US_ALU_ALPHA_ADDR_20 +0x4814 US_ALU_ALPHA_ADDR_21 +0x4818 US_ALU_ALPHA_ADDR_22 +0x481C US_ALU_ALPHA_ADDR_23 +0x4820 US_ALU_ALPHA_ADDR_24 +0x4824 US_ALU_ALPHA_ADDR_25 +0x4828 US_ALU_ALPHA_ADDR_26 +0x482C US_ALU_ALPHA_ADDR_27 +0x4830 US_ALU_ALPHA_ADDR_28 +0x4834 US_ALU_ALPHA_ADDR_29 +0x4838 US_ALU_ALPHA_ADDR_30 +0x483C US_ALU_ALPHA_ADDR_31 +0x4840 US_ALU_ALPHA_ADDR_32 +0x4844 US_ALU_ALPHA_ADDR_33 +0x4848 US_ALU_ALPHA_ADDR_34 +0x484C US_ALU_ALPHA_ADDR_35 +0x4850 US_ALU_ALPHA_ADDR_36 +0x4854 US_ALU_ALPHA_ADDR_37 +0x4858 US_ALU_ALPHA_ADDR_38 +0x485C US_ALU_ALPHA_ADDR_39 +0x4860 US_ALU_ALPHA_ADDR_40 +0x4864 US_ALU_ALPHA_ADDR_41 +0x4868 US_ALU_ALPHA_ADDR_42 +0x486C US_ALU_ALPHA_ADDR_43 +0x4870 US_ALU_ALPHA_ADDR_44 +0x4874 US_ALU_ALPHA_ADDR_45 +0x4878 US_ALU_ALPHA_ADDR_46 +0x487C US_ALU_ALPHA_ADDR_47 +0x4880 US_ALU_ALPHA_ADDR_48 +0x4884 US_ALU_ALPHA_ADDR_49 +0x4888 US_ALU_ALPHA_ADDR_50 +0x488C US_ALU_ALPHA_ADDR_51 +0x4890 US_ALU_ALPHA_ADDR_52 +0x4894 US_ALU_ALPHA_ADDR_53 +0x4898 US_ALU_ALPHA_ADDR_54 +0x489C US_ALU_ALPHA_ADDR_55 +0x48A0 US_ALU_ALPHA_ADDR_56 +0x48A4 US_ALU_ALPHA_ADDR_57 +0x48A8 US_ALU_ALPHA_ADDR_58 +0x48AC US_ALU_ALPHA_ADDR_59 +0x48B0 US_ALU_ALPHA_ADDR_60 +0x48B4 US_ALU_ALPHA_ADDR_61 +0x48B8 US_ALU_ALPHA_ADDR_62 +0x48BC US_ALU_ALPHA_ADDR_63 +0x48C0 US_ALU_RGB_INST_0 +0x48C4 US_ALU_RGB_INST_1 +0x48C8 US_ALU_RGB_INST_2 +0x48CC US_ALU_RGB_INST_3 +0x48D0 US_ALU_RGB_INST_4 +0x48D4 US_ALU_RGB_INST_5 +0x48D8 US_ALU_RGB_INST_6 +0x48DC US_ALU_RGB_INST_7 +0x48E0 US_ALU_RGB_INST_8 +0x48E4 US_ALU_RGB_INST_9 +0x48E8 US_ALU_RGB_INST_10 +0x48EC US_ALU_RGB_INST_11 +0x48F0 US_ALU_RGB_INST_12 +0x48F4 US_ALU_RGB_INST_13 +0x48F8 US_ALU_RGB_INST_14 +0x48FC US_ALU_RGB_INST_15 +0x4900 US_ALU_RGB_INST_16 +0x4904 US_ALU_RGB_INST_17 +0x4908 US_ALU_RGB_INST_18 +0x490C US_ALU_RGB_INST_19 +0x4910 US_ALU_RGB_INST_20 +0x4914 US_ALU_RGB_INST_21 +0x4918 US_ALU_RGB_INST_22 +0x491C US_ALU_RGB_INST_23 +0x4920 US_ALU_RGB_INST_24 +0x4924 US_ALU_RGB_INST_25 +0x4928 US_ALU_RGB_INST_26 +0x492C US_ALU_RGB_INST_27 +0x4930 US_ALU_RGB_INST_28 +0x4934 US_ALU_RGB_INST_29 +0x4938 US_ALU_RGB_INST_30 +0x493C US_ALU_RGB_INST_31 +0x4940 US_ALU_RGB_INST_32 +0x4944 US_ALU_RGB_INST_33 +0x4948 US_ALU_RGB_INST_34 +0x494C US_ALU_RGB_INST_35 +0x4950 US_ALU_RGB_INST_36 +0x4954 US_ALU_RGB_INST_37 +0x4958 US_ALU_RGB_INST_38 +0x495C US_ALU_RGB_INST_39 +0x4960 US_ALU_RGB_INST_40 +0x4964 US_ALU_RGB_INST_41 +0x4968 US_ALU_RGB_INST_42 +0x496C US_ALU_RGB_INST_43 +0x4970 US_ALU_RGB_INST_44 +0x4974 US_ALU_RGB_INST_45 +0x4978 US_ALU_RGB_INST_46 +0x497C US_ALU_RGB_INST_47 +0x4980 US_ALU_RGB_INST_48 +0x4984 US_ALU_RGB_INST_49 +0x4988 US_ALU_RGB_INST_50 +0x498C US_ALU_RGB_INST_51 +0x4990 US_ALU_RGB_INST_52 +0x4994 US_ALU_RGB_INST_53 +0x4998 US_ALU_RGB_INST_54 +0x499C US_ALU_RGB_INST_55 +0x49A0 US_ALU_RGB_INST_56 +0x49A4 US_ALU_RGB_INST_57 +0x49A8 US_ALU_RGB_INST_58 +0x49AC US_ALU_RGB_INST_59 +0x49B0 US_ALU_RGB_INST_60 +0x49B4 US_ALU_RGB_INST_61 +0x49B8 US_ALU_RGB_INST_62 +0x49BC US_ALU_RGB_INST_63 +0x49C0 US_ALU_ALPHA_INST_0 +0x49C4 US_ALU_ALPHA_INST_1 +0x49C8 US_ALU_ALPHA_INST_2 +0x49CC US_ALU_ALPHA_INST_3 +0x49D0 US_ALU_ALPHA_INST_4 +0x49D4 US_ALU_ALPHA_INST_5 +0x49D8 US_ALU_ALPHA_INST_6 +0x49DC US_ALU_ALPHA_INST_7 +0x49E0 US_ALU_ALPHA_INST_8 +0x49E4 US_ALU_ALPHA_INST_9 +0x49E8 US_ALU_ALPHA_INST_10 +0x49EC US_ALU_ALPHA_INST_11 +0x49F0 US_ALU_ALPHA_INST_12 +0x49F4 US_ALU_ALPHA_INST_13 +0x49F8 US_ALU_ALPHA_INST_14 +0x49FC US_ALU_ALPHA_INST_15 +0x4A00 US_ALU_ALPHA_INST_16 +0x4A04 US_ALU_ALPHA_INST_17 +0x4A08 US_ALU_ALPHA_INST_18 +0x4A0C US_ALU_ALPHA_INST_19 +0x4A10 US_ALU_ALPHA_INST_20 +0x4A14 US_ALU_ALPHA_INST_21 +0x4A18 US_ALU_ALPHA_INST_22 +0x4A1C US_ALU_ALPHA_INST_23 +0x4A20 US_ALU_ALPHA_INST_24 +0x4A24 US_ALU_ALPHA_INST_25 +0x4A28 US_ALU_ALPHA_INST_26 +0x4A2C US_ALU_ALPHA_INST_27 +0x4A30 US_ALU_ALPHA_INST_28 +0x4A34 US_ALU_ALPHA_INST_29 +0x4A38 US_ALU_ALPHA_INST_30 +0x4A3C US_ALU_ALPHA_INST_31 +0x4A40 US_ALU_ALPHA_INST_32 +0x4A44 US_ALU_ALPHA_INST_33 +0x4A48 US_ALU_ALPHA_INST_34 +0x4A4C US_ALU_ALPHA_INST_35 +0x4A50 US_ALU_ALPHA_INST_36 +0x4A54 US_ALU_ALPHA_INST_37 +0x4A58 US_ALU_ALPHA_INST_38 +0x4A5C US_ALU_ALPHA_INST_39 +0x4A60 US_ALU_ALPHA_INST_40 +0x4A64 US_ALU_ALPHA_INST_41 +0x4A68 US_ALU_ALPHA_INST_42 +0x4A6C US_ALU_ALPHA_INST_43 +0x4A70 US_ALU_ALPHA_INST_44 +0x4A74 US_ALU_ALPHA_INST_45 +0x4A78 US_ALU_ALPHA_INST_46 +0x4A7C US_ALU_ALPHA_INST_47 +0x4A80 US_ALU_ALPHA_INST_48 +0x4A84 US_ALU_ALPHA_INST_49 +0x4A88 US_ALU_ALPHA_INST_50 +0x4A8C US_ALU_ALPHA_INST_51 +0x4A90 US_ALU_ALPHA_INST_52 +0x4A94 US_ALU_ALPHA_INST_53 +0x4A98 US_ALU_ALPHA_INST_54 +0x4A9C US_ALU_ALPHA_INST_55 +0x4AA0 US_ALU_ALPHA_INST_56 +0x4AA4 US_ALU_ALPHA_INST_57 +0x4AA8 US_ALU_ALPHA_INST_58 +0x4AAC US_ALU_ALPHA_INST_59 +0x4AB0 US_ALU_ALPHA_INST_60 +0x4AB4 US_ALU_ALPHA_INST_61 +0x4AB8 US_ALU_ALPHA_INST_62 +0x4ABC US_ALU_ALPHA_INST_63 +0x4AC0 US_ALU_EXT_ADDR_0 +0x4AC4 US_ALU_EXT_ADDR_1 +0x4AC8 US_ALU_EXT_ADDR_2 +0x4ACC US_ALU_EXT_ADDR_3 +0x4AD0 US_ALU_EXT_ADDR_4 +0x4AD4 US_ALU_EXT_ADDR_5 +0x4AD8 US_ALU_EXT_ADDR_6 +0x4ADC US_ALU_EXT_ADDR_7 +0x4AE0 US_ALU_EXT_ADDR_8 +0x4AE4 US_ALU_EXT_ADDR_9 +0x4AE8 US_ALU_EXT_ADDR_10 +0x4AEC US_ALU_EXT_ADDR_11 +0x4AF0 US_ALU_EXT_ADDR_12 +0x4AF4 US_ALU_EXT_ADDR_13 +0x4AF8 US_ALU_EXT_ADDR_14 +0x4AFC US_ALU_EXT_ADDR_15 +0x4B00 US_ALU_EXT_ADDR_16 +0x4B04 US_ALU_EXT_ADDR_17 +0x4B08 US_ALU_EXT_ADDR_18 +0x4B0C US_ALU_EXT_ADDR_19 +0x4B10 US_ALU_EXT_ADDR_20 +0x4B14 US_ALU_EXT_ADDR_21 +0x4B18 US_ALU_EXT_ADDR_22 +0x4B1C US_ALU_EXT_ADDR_23 +0x4B20 US_ALU_EXT_ADDR_24 +0x4B24 US_ALU_EXT_ADDR_25 +0x4B28 US_ALU_EXT_ADDR_26 +0x4B2C US_ALU_EXT_ADDR_27 +0x4B30 US_ALU_EXT_ADDR_28 +0x4B34 US_ALU_EXT_ADDR_29 +0x4B38 US_ALU_EXT_ADDR_30 +0x4B3C US_ALU_EXT_ADDR_31 +0x4B40 US_ALU_EXT_ADDR_32 +0x4B44 US_ALU_EXT_ADDR_33 +0x4B48 US_ALU_EXT_ADDR_34 +0x4B4C US_ALU_EXT_ADDR_35 +0x4B50 US_ALU_EXT_ADDR_36 +0x4B54 US_ALU_EXT_ADDR_37 +0x4B58 US_ALU_EXT_ADDR_38 +0x4B5C US_ALU_EXT_ADDR_39 +0x4B60 US_ALU_EXT_ADDR_40 +0x4B64 US_ALU_EXT_ADDR_41 +0x4B68 US_ALU_EXT_ADDR_42 +0x4B6C US_ALU_EXT_ADDR_43 +0x4B70 US_ALU_EXT_ADDR_44 +0x4B74 US_ALU_EXT_ADDR_45 +0x4B78 US_ALU_EXT_ADDR_46 +0x4B7C US_ALU_EXT_ADDR_47 +0x4B80 US_ALU_EXT_ADDR_48 +0x4B84 US_ALU_EXT_ADDR_49 +0x4B88 US_ALU_EXT_ADDR_50 +0x4B8C US_ALU_EXT_ADDR_51 +0x4B90 US_ALU_EXT_ADDR_52 +0x4B94 US_ALU_EXT_ADDR_53 +0x4B98 US_ALU_EXT_ADDR_54 +0x4B9C US_ALU_EXT_ADDR_55 +0x4BA0 US_ALU_EXT_ADDR_56 +0x4BA4 US_ALU_EXT_ADDR_57 +0x4BA8 US_ALU_EXT_ADDR_58 +0x4BAC US_ALU_EXT_ADDR_59 +0x4BB0 US_ALU_EXT_ADDR_60 +0x4BB4 US_ALU_EXT_ADDR_61 +0x4BB8 US_ALU_EXT_ADDR_62 +0x4BBC US_ALU_EXT_ADDR_63 +0x4BC0 FG_FOG_BLEND +0x4BC4 FG_FOG_FACTOR +0x4BC8 FG_FOG_COLOR_R +0x4BCC FG_FOG_COLOR_G +0x4BD0 FG_FOG_COLOR_B +0x4BD4 FG_ALPHA_FUNC +0x4BD8 FG_DEPTH_SRC +0x4C00 US_ALU_CONST_R_0 +0x4C04 US_ALU_CONST_G_0 +0x4C08 US_ALU_CONST_B_0 +0x4C0C US_ALU_CONST_A_0 +0x4C10 US_ALU_CONST_R_1 +0x4C14 US_ALU_CONST_G_1 +0x4C18 US_ALU_CONST_B_1 +0x4C1C US_ALU_CONST_A_1 +0x4C20 US_ALU_CONST_R_2 +0x4C24 US_ALU_CONST_G_2 +0x4C28 US_ALU_CONST_B_2 +0x4C2C US_ALU_CONST_A_2 +0x4C30 US_ALU_CONST_R_3 +0x4C34 US_ALU_CONST_G_3 +0x4C38 US_ALU_CONST_B_3 +0x4C3C US_ALU_CONST_A_3 +0x4C40 US_ALU_CONST_R_4 +0x4C44 US_ALU_CONST_G_4 +0x4C48 US_ALU_CONST_B_4 +0x4C4C US_ALU_CONST_A_4 +0x4C50 US_ALU_CONST_R_5 +0x4C54 US_ALU_CONST_G_5 +0x4C58 US_ALU_CONST_B_5 +0x4C5C US_ALU_CONST_A_5 +0x4C60 US_ALU_CONST_R_6 +0x4C64 US_ALU_CONST_G_6 +0x4C68 US_ALU_CONST_B_6 +0x4C6C US_ALU_CONST_A_6 +0x4C70 US_ALU_CONST_R_7 +0x4C74 US_ALU_CONST_G_7 +0x4C78 US_ALU_CONST_B_7 +0x4C7C US_ALU_CONST_A_7 +0x4C80 US_ALU_CONST_R_8 +0x4C84 US_ALU_CONST_G_8 +0x4C88 US_ALU_CONST_B_8 +0x4C8C US_ALU_CONST_A_8 +0x4C90 US_ALU_CONST_R_9 +0x4C94 US_ALU_CONST_G_9 +0x4C98 US_ALU_CONST_B_9 +0x4C9C US_ALU_CONST_A_9 +0x4CA0 US_ALU_CONST_R_10 +0x4CA4 US_ALU_CONST_G_10 +0x4CA8 US_ALU_CONST_B_10 +0x4CAC US_ALU_CONST_A_10 +0x4CB0 US_ALU_CONST_R_11 +0x4CB4 US_ALU_CONST_G_11 +0x4CB8 US_ALU_CONST_B_11 +0x4CBC US_ALU_CONST_A_11 +0x4CC0 US_ALU_CONST_R_12 +0x4CC4 US_ALU_CONST_G_12 +0x4CC8 US_ALU_CONST_B_12 +0x4CCC US_ALU_CONST_A_12 +0x4CD0 US_ALU_CONST_R_13 +0x4CD4 US_ALU_CONST_G_13 +0x4CD8 US_ALU_CONST_B_13 +0x4CDC US_ALU_CONST_A_13 +0x4CE0 US_ALU_CONST_R_14 +0x4CE4 US_ALU_CONST_G_14 +0x4CE8 US_ALU_CONST_B_14 +0x4CEC US_ALU_CONST_A_14 +0x4CF0 US_ALU_CONST_R_15 +0x4CF4 US_ALU_CONST_G_15 +0x4CF8 US_ALU_CONST_B_15 +0x4CFC US_ALU_CONST_A_15 +0x4D00 US_ALU_CONST_R_16 +0x4D04 US_ALU_CONST_G_16 +0x4D08 US_ALU_CONST_B_16 +0x4D0C US_ALU_CONST_A_16 +0x4D10 US_ALU_CONST_R_17 +0x4D14 US_ALU_CONST_G_17 +0x4D18 US_ALU_CONST_B_17 +0x4D1C US_ALU_CONST_A_17 +0x4D20 US_ALU_CONST_R_18 +0x4D24 US_ALU_CONST_G_18 +0x4D28 US_ALU_CONST_B_18 +0x4D2C US_ALU_CONST_A_18 +0x4D30 US_ALU_CONST_R_19 +0x4D34 US_ALU_CONST_G_19 +0x4D38 US_ALU_CONST_B_19 +0x4D3C US_ALU_CONST_A_19 +0x4D40 US_ALU_CONST_R_20 +0x4D44 US_ALU_CONST_G_20 +0x4D48 US_ALU_CONST_B_20 +0x4D4C US_ALU_CONST_A_20 +0x4D50 US_ALU_CONST_R_21 +0x4D54 US_ALU_CONST_G_21 +0x4D58 US_ALU_CONST_B_21 +0x4D5C US_ALU_CONST_A_21 +0x4D60 US_ALU_CONST_R_22 +0x4D64 US_ALU_CONST_G_22 +0x4D68 US_ALU_CONST_B_22 +0x4D6C US_ALU_CONST_A_22 +0x4D70 US_ALU_CONST_R_23 +0x4D74 US_ALU_CONST_G_23 +0x4D78 US_ALU_CONST_B_23 +0x4D7C US_ALU_CONST_A_23 +0x4D80 US_ALU_CONST_R_24 +0x4D84 US_ALU_CONST_G_24 +0x4D88 US_ALU_CONST_B_24 +0x4D8C US_ALU_CONST_A_24 +0x4D90 US_ALU_CONST_R_25 +0x4D94 US_ALU_CONST_G_25 +0x4D98 US_ALU_CONST_B_25 +0x4D9C US_ALU_CONST_A_25 +0x4DA0 US_ALU_CONST_R_26 +0x4DA4 US_ALU_CONST_G_26 +0x4DA8 US_ALU_CONST_B_26 +0x4DAC US_ALU_CONST_A_26 +0x4DB0 US_ALU_CONST_R_27 +0x4DB4 US_ALU_CONST_G_27 +0x4DB8 US_ALU_CONST_B_27 +0x4DBC US_ALU_CONST_A_27 +0x4DC0 US_ALU_CONST_R_28 +0x4DC4 US_ALU_CONST_G_28 +0x4DC8 US_ALU_CONST_B_28 +0x4DCC US_ALU_CONST_A_28 +0x4DD0 US_ALU_CONST_R_29 +0x4DD4 US_ALU_CONST_G_29 +0x4DD8 US_ALU_CONST_B_29 +0x4DDC US_ALU_CONST_A_29 +0x4DE0 US_ALU_CONST_R_30 +0x4DE4 US_ALU_CONST_G_30 +0x4DE8 US_ALU_CONST_B_30 +0x4DEC US_ALU_CONST_A_30 +0x4DF0 US_ALU_CONST_R_31 +0x4DF4 US_ALU_CONST_G_31 +0x4DF8 US_ALU_CONST_B_31 +0x4DFC US_ALU_CONST_A_31 +0x4E04 RB3D_BLENDCNTL_R3 +0x4E08 RB3D_ABLENDCNTL_R3 +0x4E0C RB3D_COLOR_CHANNEL_MASK +0x4E10 RB3D_CONSTANT_COLOR +0x4E14 RB3D_COLOR_CLEAR_VALUE +0x4E18 RB3D_ROPCNTL_R3 +0x4E1C RB3D_CLRCMP_FLIPE_R3 +0x4E20 RB3D_CLRCMP_CLR_R3 +0x4E24 RB3D_CLRCMP_MSK_R3 +0x4E48 RB3D_DEBUG_CTL +0x4E4C RB3D_DSTCACHE_CTLSTAT_R3 +0x4E50 RB3D_DITHER_CTL +0x4E54 RB3D_CMASK_OFFSET0 +0x4E58 RB3D_CMASK_OFFSET1 +0x4E5C RB3D_CMASK_OFFSET2 +0x4E60 RB3D_CMASK_OFFSET3 +0x4E64 RB3D_CMASK_PITCH0 +0x4E68 RB3D_CMASK_PITCH1 +0x4E6C RB3D_CMASK_PITCH2 +0x4E70 RB3D_CMASK_PITCH3 +0x4E74 RB3D_CMASK_WRINDEX +0x4E78 RB3D_CMASK_DWORD +0x4E7C RB3D_CMASK_RDINDEX +0x4E80 RB3D_AARESOLVE_OFFSET +0x4E84 RB3D_AARESOLVE_PITCH +0x4E88 RB3D_AARESOLVE_CTL +0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD +0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD +0x4F04 ZB_ZSTENCILCNTL +0x4F08 ZB_STENCILREFMASK +0x4F14 ZB_ZTOP +0x4F18 ZB_ZCACHE_CTLSTAT +0x4F1C ZB_BW_CNTL +0x4F28 ZB_DEPTHCLEARVALUE +0x4F30 ZB_ZMASK_OFFSET +0x4F34 ZB_ZMASK_PITCH +0x4F38 ZB_ZMASK_WRINDEX +0x4F3C ZB_ZMASK_DWORD +0x4F40 ZB_ZMASK_RDINDEX +0x4F44 ZB_HIZ_OFFSET +0x4F48 ZB_HIZ_WRINDEX +0x4F4C ZB_HIZ_DWORD +0x4F50 ZB_HIZ_RDINDEX +0x4F54 ZB_HIZ_PITCH +0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 8e3c0b807ad..6801b865d1c 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 @@ -153,7 +153,7 @@ rs600 0x6d40 0x42A4 SU_POLY_OFFSET_FRONT_SCALE 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET 0x42AC SU_POLY_OFFSET_BACK_SCALE -0x42B0 SU_POLY_OFFSET_BACK_OFFSET +0x42B0 SU_POLY_OFFSET_BACK_OFFSET 0x42B4 SU_POLY_OFFSET_ENABLE 0x42B8 SU_CULL_MODE 0x42C0 SU_DEPTH_SCALE @@ -291,6 +291,8 @@ rs600 0x6d40 0x46AC US_OUT_FMT_2 0x46B0 US_OUT_FMT_3 0x46B4 US_W_FMT +0x46B8 US_CODE_BANK +0x46BC US_CODE_EXT 0x46C0 US_ALU_RGB_ADDR_0 0x46C4 US_ALU_RGB_ADDR_1 0x46C8 US_ALU_RGB_ADDR_2 @@ -547,6 +549,70 @@ rs600 0x6d40 0x4AB4 US_ALU_ALPHA_INST_61 0x4AB8 US_ALU_ALPHA_INST_62 0x4ABC US_ALU_ALPHA_INST_63 +0x4AC0 US_ALU_EXT_ADDR_0 +0x4AC4 US_ALU_EXT_ADDR_1 +0x4AC8 US_ALU_EXT_ADDR_2 +0x4ACC US_ALU_EXT_ADDR_3 +0x4AD0 US_ALU_EXT_ADDR_4 +0x4AD4 US_ALU_EXT_ADDR_5 +0x4AD8 US_ALU_EXT_ADDR_6 +0x4ADC US_ALU_EXT_ADDR_7 +0x4AE0 US_ALU_EXT_ADDR_8 +0x4AE4 US_ALU_EXT_ADDR_9 +0x4AE8 US_ALU_EXT_ADDR_10 +0x4AEC US_ALU_EXT_ADDR_11 +0x4AF0 US_ALU_EXT_ADDR_12 +0x4AF4 US_ALU_EXT_ADDR_13 +0x4AF8 US_ALU_EXT_ADDR_14 +0x4AFC US_ALU_EXT_ADDR_15 +0x4B00 US_ALU_EXT_ADDR_16 +0x4B04 US_ALU_EXT_ADDR_17 +0x4B08 US_ALU_EXT_ADDR_18 +0x4B0C US_ALU_EXT_ADDR_19 +0x4B10 US_ALU_EXT_ADDR_20 +0x4B14 US_ALU_EXT_ADDR_21 +0x4B18 US_ALU_EXT_ADDR_22 +0x4B1C US_ALU_EXT_ADDR_23 +0x4B20 US_ALU_EXT_ADDR_24 +0x4B24 US_ALU_EXT_ADDR_25 +0x4B28 US_ALU_EXT_ADDR_26 +0x4B2C US_ALU_EXT_ADDR_27 +0x4B30 US_ALU_EXT_ADDR_28 +0x4B34 US_ALU_EXT_ADDR_29 +0x4B38 US_ALU_EXT_ADDR_30 +0x4B3C US_ALU_EXT_ADDR_31 +0x4B40 US_ALU_EXT_ADDR_32 +0x4B44 US_ALU_EXT_ADDR_33 +0x4B48 US_ALU_EXT_ADDR_34 +0x4B4C US_ALU_EXT_ADDR_35 +0x4B50 US_ALU_EXT_ADDR_36 +0x4B54 US_ALU_EXT_ADDR_37 +0x4B58 US_ALU_EXT_ADDR_38 +0x4B5C US_ALU_EXT_ADDR_39 +0x4B60 US_ALU_EXT_ADDR_40 +0x4B64 US_ALU_EXT_ADDR_41 +0x4B68 US_ALU_EXT_ADDR_42 +0x4B6C US_ALU_EXT_ADDR_43 +0x4B70 US_ALU_EXT_ADDR_44 +0x4B74 US_ALU_EXT_ADDR_45 +0x4B78 US_ALU_EXT_ADDR_46 +0x4B7C US_ALU_EXT_ADDR_47 +0x4B80 US_ALU_EXT_ADDR_48 +0x4B84 US_ALU_EXT_ADDR_49 +0x4B88 US_ALU_EXT_ADDR_50 +0x4B8C US_ALU_EXT_ADDR_51 +0x4B90 US_ALU_EXT_ADDR_52 +0x4B94 US_ALU_EXT_ADDR_53 +0x4B98 US_ALU_EXT_ADDR_54 +0x4B9C US_ALU_EXT_ADDR_55 +0x4BA0 US_ALU_EXT_ADDR_56 +0x4BA4 US_ALU_EXT_ADDR_57 +0x4BA8 US_ALU_EXT_ADDR_58 +0x4BAC US_ALU_EXT_ADDR_59 +0x4BB0 US_ALU_EXT_ADDR_60 +0x4BB4 US_ALU_EXT_ADDR_61 +0x4BB8 US_ALU_EXT_ADDR_62 +0x4BBC US_ALU_EXT_ADDR_63 0x4BC0 FG_FOG_BLEND 0x4BC4 FG_FOG_FACTOR 0x4BC8 FG_FOG_COLOR_R diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 0102a0d5735..38abf63bf2c 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -161,7 +161,12 @@ rv515 0x6d40 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE +0x4028 GB_Z_PEQ_CONFIG 0x4100 TX_INVALTAGS +0x4114 SU_TEX_WRAP_PS3 +0x4118 PS3_ENABLE +0x411c PS3_VTX_FMT +0x4120 PS3_TEX_SOURCE 0x4200 GA_POINT_S0 0x4204 GA_POINT_T0 0x4208 GA_POINT_S1 @@ -171,6 +176,7 @@ rv515 0x6d40 0x4230 GA_POINT_MINMAX 0x4234 GA_LINE_CNTL 0x4238 GA_LINE_STIPPLE_CONFIG +0x4258 GA_COLOR_CONTROL_PS3 0x4260 GA_LINE_STIPPLE_VALUE 0x4264 GA_LINE_S0 0x4268 GA_LINE_S1 diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ca037160a58..9f5418983e2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -352,10 +352,11 @@ static int rs400_mc_init(struct radeon_device *rdev) u32 tmp; /* Setup GPU memory space */ - tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); + tmp = RREG32(R_00015C_NB_TOM); rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; rdev->mc.gtt_location = 0xFFFFFFFFUL; r = radeon_mc_setup(rdev); + rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); if (r) return r; return 0; @@ -387,14 +388,15 @@ static int rs400_startup(struct radeon_device *rdev) r300_clock_startup(rdev); /* Initialize GPU configuration (# pipes, ...) */ rs400_gpu_init(rdev); + r100_enable_bm(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r = rs400_gart_enable(rdev); if (r) return r; /* Enable IRQ */ - rdev->irq.sw_int = true; r100_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -430,6 +432,8 @@ int rs400_resume(struct radeon_device *rdev) radeon_combios_asic_init(rdev->ddev); /* Resume clock after posting */ r300_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return rs400_startup(rdev); } @@ -452,7 +456,7 @@ void rs400_fini(struct radeon_device *rdev) rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -490,12 +494,13 @@ int rs400_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; + /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); + /* Initialize power management */ + radeon_pm_init(rdev); /* Get vram informations */ rs400_vram_info(rdev); /* Initialize memory controller (also test AGP) */ @@ -510,7 +515,7 @@ int rs400_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; r = rs400_gart_init(rdev); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5f117cd8736..d5255751e7b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -45,6 +45,124 @@ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); +int rs600_mc_init(struct radeon_device *rdev) +{ + /* read back the MC value from the hw */ + int r; + u32 tmp; + + /* Setup GPU memory space */ + tmp = RREG32_MC(R_000004_MC_FB_LOCATION); + rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; + rdev->mc.gtt_location = 0xffffffffUL; + r = radeon_mc_setup(rdev); + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + if (r) + return r; + return 0; +} + +/* hpd for digital panel detect/disconnect */ +bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) +{ + u32 tmp; + bool connected = false; + + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); + if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) + connected = true; + break; + case RADEON_HPD_2: + tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); + if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) + connected = true; + break; + default: + break; + } + return connected; +} + +void rs600_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd) +{ + u32 tmp; + bool connected = rs600_hpd_sense(rdev, hpd); + + switch (hpd) { + case RADEON_HPD_1: + tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); + if (connected) + tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); + else + tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); + WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); + break; + case RADEON_HPD_2: + tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); + if (connected) + tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); + else + tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); + WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); + break; + default: + break; + } +} + +void rs600_hpd_init(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, + S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); + rdev->irq.hpd[0] = true; + break; + case RADEON_HPD_2: + WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, + S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); + rdev->irq.hpd[1] = true; + break; + default: + break; + } + } + if (rdev->irq.installed) + rs600_irq_set(rdev); +} + +void rs600_hpd_fini(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + switch (radeon_connector->hpd.hpd) { + case RADEON_HPD_1: + WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, + S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); + rdev->irq.hpd[0] = false; + break; + case RADEON_HPD_2: + WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, + S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); + rdev->irq.hpd[1] = false; + break; + default: + break; + } + } +} + /* * GART. */ @@ -100,40 +218,40 @@ int rs600_gart_enable(struct radeon_device *rdev) WREG32(R_00004C_BUS_CNTL, tmp); /* FIXME: setup default page */ WREG32_MC(R_000100_MC_PT0_CNTL, - (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | - S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); + (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | + S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); + for (i = 0; i < 19; i++) { WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, - S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | - S_00016C_SYSTEM_ACCESS_MODE_MASK( - V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | - S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( - V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | - S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | - S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | - S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); + S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | + S_00016C_SYSTEM_ACCESS_MODE_MASK( + V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | + S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( + V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | + S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | + S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | + S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); } - - /* System context map to GART space */ - WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); - WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); - /* enable first context */ - WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); - WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, - S_000102_ENABLE_PAGE_TABLE(1) | - S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); + S_000102_ENABLE_PAGE_TABLE(1) | + S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); + /* disable all other contexts */ - for (i = 1; i < 8; i++) { + for (i = 1; i < 8; i++) WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); - } /* setup the page table */ WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, - rdev->gart.table_addr); + rdev->gart.table_addr); + WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); + WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); + /* System context maps to VRAM space */ + WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); + WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); + /* enable page tables */ tmp = RREG32_MC(R_000100_MC_PT0_CNTL); WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); @@ -146,15 +264,20 @@ int rs600_gart_enable(struct radeon_device *rdev) void rs600_gart_disable(struct radeon_device *rdev) { - uint32_t tmp; + u32 tmp; + int r; /* FIXME: disable out of gart access */ WREG32_MC(R_000100_MC_PT0_CNTL, 0); tmp = RREG32_MC(R_000009_MC_CNTL1); WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); if (rdev->gart.table.vram.robj) { - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (r == 0) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } } } @@ -189,7 +312,16 @@ int rs600_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; uint32_t mode_int = 0; - + u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & + ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); + u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & + ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WREG32(R_000040_GEN_INT_CNTL, 0); + return -EINVAL; + } if (rdev->irq.sw_int) { tmp |= S_000040_SW_INT_EN(1); } @@ -199,8 +331,16 @@ int rs600_irq_set(struct radeon_device *rdev) if (rdev->irq.crtc_vblank_int[1]) { mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } + if (rdev->irq.hpd[0]) { + hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); + } + if (rdev->irq.hpd[1]) { + hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); + } WREG32(R_000040_GEN_INT_CNTL, tmp); WREG32(R_006540_DxMODE_INT_MASK, mode_int); + WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); + WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); return 0; } @@ -208,6 +348,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ { uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); uint32_t irq_mask = ~C_000044_SW_INT; + u32 tmp; if (G_000044_DISPLAY_INT_STAT(irqs)) { *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); @@ -219,6 +360,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ WREG32(R_006D34_D2MODE_VBLANK_STATUS, S_006D34_D2MODE_VBLANK_ACK(1)); } + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { + tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); + tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); + WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); + } + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { + tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); + tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); + WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); + } } else { *r500_disp_int = 0; } @@ -244,6 +395,7 @@ int rs600_irq_process(struct radeon_device *rdev) { uint32_t status, msi_rearm; uint32_t r500_disp_int; + bool queue_hotplug = false; status = rs600_irq_ack(rdev, &r500_disp_int); if (!status && !r500_disp_int) { @@ -251,15 +403,25 @@ int rs600_irq_process(struct radeon_device *rdev) } while (status || r500_disp_int) { /* SW interrupt */ - if (G_000040_SW_INT_EN(status)) + if (G_000044_SW_INT(status)) radeon_fence_process(rdev); /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) drm_handle_vblank(rdev->ddev, 0); if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) drm_handle_vblank(rdev->ddev, 1); + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { + queue_hotplug = true; + DRM_DEBUG("HPD1\n"); + } + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { + queue_hotplug = true; + DRM_DEBUG("HPD2\n"); + } status = rs600_irq_ack(rdev, &r500_disp_int); } + if (queue_hotplug) + queue_work(rdev->wq, &rdev->hotplug_work); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS600: @@ -301,9 +463,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) void rs600_gpu_init(struct radeon_device *rdev) { - /* FIXME: HDP same place on rs600 ? */ r100_hdp_reset(rdev); - /* FIXME: is this correct ? */ r420_pipes_init(rdev); /* Wait for mc idle */ if (rs600_mc_wait_for_idle(rdev)) @@ -312,9 +472,20 @@ void rs600_gpu_init(struct radeon_device *rdev) void rs600_vram_info(struct radeon_device *rdev) { - /* FIXME: to do or is these values sane ? */ rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; + + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; + + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); + rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.mc_vram_size = rdev->mc.aper_size; + + if (rdev->mc.real_vram_size > rdev->mc.aper_size) + rdev->mc.real_vram_size = rdev->mc.aper_size; } void rs600_bandwidth_update(struct radeon_device *rdev) @@ -388,8 +559,8 @@ static int rs600_startup(struct radeon_device *rdev) if (r) return r; /* Enable IRQ */ - rdev->irq.sw_int = true; rs600_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -423,6 +594,8 @@ int rs600_resume(struct radeon_device *rdev) atom_asic_init(rdev->mode_info.atom_context); /* Resume clock after posting */ rv515_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return rs600_startup(rdev); } @@ -445,7 +618,7 @@ void rs600_fini(struct radeon_device *rdev) rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -482,10 +655,9 @@ int rs600_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - atom_asic_init(rdev->mode_info.atom_context); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; + /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* Initialize power management */ @@ -493,7 +665,7 @@ int rs600_init(struct radeon_device *rdev) /* Get vram informations */ rs600_vram_info(rdev); /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); + r = rs600_mc_init(rdev); if (r) return r; rs600_debugfs(rdev); @@ -505,7 +677,7 @@ int rs600_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; r = rs600_gart_init(rdev); diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index 81308924859..c1c8f5885cb 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h @@ -30,27 +30,12 @@ /* Registers */ #define R_000040_GEN_INT_CNTL 0x000040 -#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) -#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) -#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE -#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) -#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) -#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF -#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) -#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) -#define C_000040_CRTC2_VSYNC 0xFFFFFFBF -#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) -#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) -#define C_000040_SNAPSHOT2 0xFFFFFF7F -#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) -#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) -#define C_000040_CRTC2_VBLANK 0xFFFFFDFF -#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) -#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) -#define C_000040_FP2_DETECT 0xFFFFFBFF -#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) -#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) -#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF +#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18) +#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1) +#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF +#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19) +#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1) +#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF @@ -370,7 +355,90 @@ #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF - +#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16) +#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1) +#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF +#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17) +#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1) +#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF +#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18) +#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1) +#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF +#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19) +#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1) +#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF +#define R_007828_DACA_AUTODETECT_CONTROL 0x007828 +#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0) +#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3) +#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC +#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8) +#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff) +#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF +#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16) +#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3) +#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF +#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838 +#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0) +#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE +#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16) +#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1) +#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF +#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28 +#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0) +#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3) +#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC +#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8) +#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff) +#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF +#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16) +#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3) +#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF +#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38 +#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0) +#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE +#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16) +#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1) +#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF +#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00 +#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0) +#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1) +#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE +#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04 +#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0) +#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1) +#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE +#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1) +#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1) +#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD +#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08 +#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0) +#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE +#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8) +#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1) +#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF +#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16) +#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1) +#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF +#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10 +#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0) +#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1) +#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE +#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14 +#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0) +#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1) +#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE +#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1) +#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1) +#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD +#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18 +#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0) +#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE +#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8) +#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1) +#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF +#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16) +#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1) +#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF /* MC registers */ #define R_000000_MC_STATUS 0x000000 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 27547175cf9..cd31da91377 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev) void rs690_vram_info(struct radeon_device *rdev) { - uint32_t tmp; fixed20_12 a; rs400_gart_adjust_size(rdev); - /* DDR for all card after R300 & IGP */ + rdev->mc.vram_is_ddr = true; - /* FIXME: is this correct for RS690/RS740 ? */ - tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & R300_MEM_NUM_CHANNELS_MASK) { - rdev->mc.vram_width = 128; - } else { - rdev->mc.vram_width = 64; - } + rdev->mc.vram_width = 128; + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.mc_vram_size = rdev->mc.aper_size; + + if (rdev->mc.real_vram_size > rdev->mc.aper_size) + rdev->mc.real_vram_size = rdev->mc.aper_size; + rs690_pm_info(rdev); /* FIXME: we should enforce default clock in case GPU is not in * default setup @@ -161,6 +162,22 @@ void rs690_vram_info(struct radeon_device *rdev) rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); } +static int rs690_mc_init(struct radeon_device *rdev) +{ + int r; + u32 tmp; + + /* Setup GPU memory space */ + tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION); + rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; + rdev->mc.gtt_location = 0xFFFFFFFFUL; + r = radeon_mc_setup(rdev); + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + if (r) + return r; + return 0; +} + void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode1, struct drm_display_mode *mode2) @@ -244,8 +261,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, b.full = rfixed_const(mode->crtc_hdisplay); c.full = rfixed_const(256); - a.full = rfixed_mul(wm->num_line_pair, b); - request_fifo_depth.full = rfixed_div(a, c); + a.full = rfixed_div(b, c); + request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); + request_fifo_depth.full = rfixed_ceil(request_fifo_depth); if (a.full < rfixed_const(4)) { wm->lb_request_fifo_depth = 4; } else { @@ -374,6 +392,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, a.full = rfixed_const(16); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); /* Determine estimated width */ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; @@ -383,6 +402,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, } else { a.full = rfixed_const(16); wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = rfixed_ceil(wm->priority_mark); wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; } } @@ -605,8 +625,8 @@ static int rs690_startup(struct radeon_device *rdev) if (r) return r; /* Enable IRQ */ - rdev->irq.sw_int = true; rs600_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -640,6 +660,8 @@ int rs690_resume(struct radeon_device *rdev) atom_asic_init(rdev->mode_info.atom_context); /* Resume clock after posting */ rv515_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return rs690_startup(rdev); } @@ -662,7 +684,7 @@ void rs690_fini(struct radeon_device *rdev) rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -700,10 +722,9 @@ int rs690_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - atom_asic_init(rdev->mode_info.atom_context); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; + /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* Initialize power management */ @@ -711,7 +732,7 @@ int rs690_init(struct radeon_device *rdev) /* Get vram informations */ rs690_vram_info(rdev); /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); + r = rs690_mc_init(rdev); if (r) return r; rv515_debugfs(rdev); @@ -723,7 +744,7 @@ int rs690_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; r = rs400_gart_init(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ba68c9fe90a..62756717b04 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -478,8 +478,8 @@ static int rv515_startup(struct radeon_device *rdev) return r; } /* Enable IRQ */ - rdev->irq.sw_int = true; rs600_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -514,6 +514,8 @@ int rv515_resume(struct radeon_device *rdev) atom_asic_init(rdev->mode_info.atom_context); /* Resume clock after posting */ rv515_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return rv515_startup(rdev); } @@ -540,11 +542,11 @@ void rv515_fini(struct radeon_device *rdev) r100_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); - rv370_pcie_gart_fini(rdev); + rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -580,10 +582,8 @@ int rv515_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - atom_asic_init(rdev->mode_info.atom_context); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); /* Initialize power management */ @@ -603,7 +603,7 @@ int rv515_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; r = rv370_pcie_gart_init(rdev); @@ -892,8 +892,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, b.full = rfixed_const(mode->crtc_hdisplay); c.full = rfixed_const(256); - a.full = rfixed_mul(wm->num_line_pair, b); - request_fifo_depth.full = rfixed_div(a, c); + a.full = rfixed_div(b, c); + request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair); + request_fifo_depth.full = rfixed_ceil(request_fifo_depth); if (a.full < rfixed_const(4)) { wm->lb_request_fifo_depth = 4; } else { @@ -995,15 +996,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, a.full = rfixed_const(16); wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max); /* Determine estimated width */ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; estimated_width.full = rfixed_div(estimated_width, consumption_time); if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { - wm->priority_mark.full = rfixed_const(10); + wm->priority_mark.full = wm->priority_mark_max.full; } else { a.full = rfixed_const(16); wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = rfixed_ceil(wm->priority_mark); wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; } } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 5e06ee7076f..afd9e8213c2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) void rv770_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; - int i; + int i, r; /* Disable all tables */ for (i = 0; i < 7; i++) @@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); if (rdev->gart.table.vram.robj) { - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } } } @@ -775,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev) fixed20_12 a; u32 tmp; int chansize, numchan; - int r; /* Get VRAM informations */ rdev->mc.vram_is_ddr = true; @@ -818,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = rdev->mc.aper_size; if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) - return r; /* gtt_size is setup by radeon_agp_init */ rdev->mc.gtt_location = rdev->mc.agp_base; tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; @@ -870,6 +870,14 @@ static int rv770_startup(struct radeon_device *rdev) { int r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + rv770_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); @@ -879,13 +887,27 @@ static int rv770_startup(struct radeon_device *rdev) return r; } rv770_gpu_init(rdev); - - r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); + /* pin copy shader into vram */ + if (rdev->r600_blit.shader_obj) { + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("failed to pin blit object %d\n", r); + return r; + } + } + /* Enable IRQ */ + r = r600_irq_init(rdev); if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); + DRM_ERROR("radeon: IH init failed (%d).\n", r); + radeon_irq_kms_fini(rdev); return r; } + r600_irq_set(rdev); r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) @@ -934,13 +956,22 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { + int r; + /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; + r600_irq_suspend(rdev); r600_wb_disable(rdev); rv770_pcie_gart_disable(rdev); /* unpin shaders bo */ - radeon_object_unpin(rdev->r600_blit.shader_obj); + if (rdev->r600_blit.shader_obj) { + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (likely(r == 0)) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } + } return 0; } @@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; /* Post card if necessary */ - if (!r600_card_posted(rdev) && rdev->bios) { + if (!r600_card_posted(rdev)) { + if (!rdev->bios) { + dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); + return -EINVAL; + } DRM_INFO("GPU not posted. posting now...\n"); atom_asic_init(rdev->mode_info.atom_context); } @@ -994,35 +1029,40 @@ int rv770_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) + radeon_agp_disable(rdev); + } r = rv770_mc_init(rdev); if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; + + r = radeon_irq_kms_init(rdev); + if (r) + return r; + rdev->cp.ring_obj = NULL; r600_ring_init(rdev, 1024 * 1024); - if (!rdev->me_fw || !rdev->pfp_fw) { - r = r600_cp_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } + rdev->ih.ring_obj = NULL; + r600_ih_ring_init(rdev, 64 * 1024); r = r600_pcie_gart_init(rdev); if (r) return r; - - rdev->accel_working = true; r = r600_blit_init(rdev); if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - rdev->accel_working = false; + r600_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + rdev->accel_working = true; r = rv770_startup(rdev); if (r) { rv770_suspend(rdev); @@ -1034,13 +1074,14 @@ int rv770_init(struct radeon_device *rdev) if (rdev->accel_working) { r = radeon_ib_pool_init(rdev); if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - rdev->accel_working = false; - } - r = r600_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); rdev->accel_working = false; + } else { + r = r600_ib_test(rdev); + if (r) { + dev_err(rdev->dev, "IB test failed (%d).\n", r); + rdev->accel_working = false; + } } } return 0; @@ -1051,15 +1092,16 @@ void rv770_fini(struct radeon_device *rdev) rv770_suspend(rdev); r600_blit_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); radeon_ring_fini(rdev); r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); - if (rdev->flags & RADEON_IS_AGP) - radeon_agp_fini(rdev); - radeon_object_fini(rdev); + radeon_agp_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; |