diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
69 files changed, 2544 insertions, 1080 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index f77b7135ee4..d01b8799142 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -60,7 +60,7 @@ radeon-y := radeon_drv.o # add UMS driver radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \ - radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o + radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o # add KMS driver radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ @@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ - radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \ + radeon_pm.o atombios_dp.o r600_hdmi.o dce3_1_afmt.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o + ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o # add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index ac14b67621d..95d5d4ab333 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -232,8 +232,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) /***** general DP utility functions *****/ -#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 -#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 +#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 +#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], int lane_count, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a7f2ddf09a9..b8cd7975f79 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -291,29 +291,6 @@ static void radeon_atom_backlight_exit(struct radeon_encoder *encoder) bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode); - -static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - return true; - default: - return false; - } -} - static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index f81d7ca134d..300d971187c 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -1170,23 +1170,6 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = { 25000, 30000, RADEON_SCLK_UP } }; -void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, - u32 *max_clock) -{ - u32 i, clock = 0; - - if ((table == NULL) || (table->count == 0)) { - *max_clock = clock; - return; - } - - for (i = 0; i < table->count; i++) { - if (clock < table->entries[i].clk) - clock = table->entries[i].clk; - } - *max_clock = clock; -} - void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage) { @@ -2099,7 +2082,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_mclk_switching; u32 mclk, sclk; u16 vddc, vddci; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; if ((rdev->pm.dpm.new_active_crtc_count > 1) || btc_dpm_vblank_too_short(rdev)) @@ -2141,39 +2123,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, ps->low.vddci = max_limits->vddci; } - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - if (max_sclk_vddc) { - if (ps->low.sclk > max_sclk_vddc) - ps->low.sclk = max_sclk_vddc; - if (ps->medium.sclk > max_sclk_vddc) - ps->medium.sclk = max_sclk_vddc; - if (ps->high.sclk > max_sclk_vddc) - ps->high.sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->low.mclk > max_mclk_vddci) - ps->low.mclk = max_mclk_vddci; - if (ps->medium.mclk > max_mclk_vddci) - ps->medium.mclk = max_mclk_vddci; - if (ps->high.mclk > max_mclk_vddci) - ps->high.mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->low.mclk > max_mclk_vddc) - ps->low.mclk = max_mclk_vddc; - if (ps->medium.mclk > max_mclk_vddc) - ps->medium.mclk = max_mclk_vddc; - if (ps->high.mclk > max_mclk_vddc) - ps->high.mclk = max_mclk_vddc; - } - /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h index 3b6f12b7760..1a15e0e4195 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.h +++ b/drivers/gpu/drm/radeon/btc_dpm.h @@ -46,8 +46,6 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev, struct rv7xx_pl *pl); void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage); -void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, - u32 *max_clock); void btc_apply_voltage_delta_rules(struct radeon_device *rdev, u16 max_vddc, u16 max_vddci, u16 *vddc, u16 *vddci); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d416bb2ff48..f5c8c0445a9 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -162,8 +162,6 @@ static const struct ci_pt_config_reg didt_config_ci[] = }; extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); -extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, - u32 *max_clock); extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, u32 arb_freq_src, u32 arb_freq_dest); extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); @@ -748,7 +746,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_clock_and_voltage_limits *max_limits; bool disable_mclk_switching; u32 sclk, mclk; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if (rps->vce_active) { @@ -784,29 +781,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, } } - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - for (i = 0; i < ps->performance_level_count; i++) { - if (max_sclk_vddc) { - if (ps->performance_levels[i].sclk > max_sclk_vddc) - ps->performance_levels[i].sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->performance_levels[i].mclk > max_mclk_vddci) - ps->performance_levels[i].mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->performance_levels[i].mclk > max_mclk_vddc) - ps->performance_levels[i].mclk = max_mclk_vddc; - } - } - /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { @@ -5293,9 +5267,13 @@ int ci_dpm_init(struct radeon_device *rdev) void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { + struct ci_power_info *pi = ci_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; u32 sclk = ci_get_average_sclk_freq(rdev); u32 mclk = ci_get_average_mclk_freq(rdev); + seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); + seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); seq_printf(m, "power level avg sclk: %u mclk: %u\n", sclk, mclk); } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 3d546c606b4..377afa504d2 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the CP DMA engine (CIK+). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int cik_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.blit_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes, control; @@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /* @@ -4234,7 +4235,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) WREG32(CP_PFP_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(CP_PFP_UCODE_ADDR, 0); + WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version)); /* CE */ fw_data = (const __le32 *) @@ -4243,7 +4244,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) WREG32(CP_CE_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(CP_CE_UCODE_ADDR, 0); + WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version)); /* ME */ fw_data = (const __be32 *) @@ -4252,7 +4253,8 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) WREG32(CP_ME_RAM_WADDR, 0); for (i = 0; i < fw_size; i++) WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); - WREG32(CP_ME_RAM_WADDR, 0); + WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version)); + WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version)); } else { const __be32 *fw_data; @@ -4278,10 +4280,6 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) WREG32(CP_ME_RAM_WADDR, 0); } - WREG32(CP_PFP_UCODE_ADDR, 0); - WREG32(CP_CE_UCODE_ADDR, 0); - WREG32(CP_ME_RAM_WADDR, 0); - WREG32(CP_ME_RAM_RADDR, 0); return 0; } @@ -4563,7 +4561,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev) WREG32(CP_MEC_ME1_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(CP_MEC_ME1_UCODE_ADDR, 0); + WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version)); /* MEC2 */ if (rdev->family == CHIP_KAVERI) { @@ -4577,7 +4575,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev) WREG32(CP_MEC_ME2_UCODE_ADDR, 0); for (i = 0; i < fw_size; i++) WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version)); } } else { const __be32 *fw_data; @@ -4689,7 +4687,7 @@ static int cik_mec_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &rdev->mec.hpd_eop_obj); if (r) { dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -4860,7 +4858,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) sizeof(struct bonaire_mqd), PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 0, NULL, - &rdev->ring[idx].mqd_obj); + NULL, &rdev->ring[idx].mqd_obj); if (r) { dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); return r; @@ -6226,7 +6224,7 @@ static int cik_rlc_resume(struct radeon_device *rdev) WREG32(RLC_GPM_UCODE_ADDR, 0); for (i = 0; i < size; i++) WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(RLC_GPM_UCODE_ADDR, 0); + WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version)); } else { const __be32 *fw_data; @@ -8255,8 +8253,10 @@ restart_ih: } if (queue_hotplug) schedule_work(&rdev->hotplug_work); - if (queue_reset) - schedule_work(&rdev->reset_work); + if (queue_reset) { + rdev->needs_reset = true; + wake_up_all(&rdev->fence_queue); + } if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index c4ffa54b1e3..c77dad1a457 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -530,18 +530,19 @@ void cik_sdma_fini(struct radeon_device *rdev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (CIK). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int cik_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes; @@ -551,7 +552,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -560,10 +561,10 @@ int cik_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -582,17 +583,17 @@ int cik_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /** diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index 51800e340a5..950af153f30 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -165,7 +165,7 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m /* disable audio prior to setting up hw */ dig->afmt->pin = r600_audio_get_pin(rdev); - r600_audio_enable(rdev, dig->afmt->pin, false); + r600_audio_enable(rdev, dig->afmt->pin, 0); r600_audio_set_dto(encoder, mode->clock); @@ -240,5 +240,5 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m r600_hdmi_audio_workaround(encoder); /* enable audio after to setting up hw */ - r600_audio_enable(rdev, dig->afmt->pin, true); + r600_audio_enable(rdev, dig->afmt->pin, 0xf); } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index ab29f953a76..c0bbf68dbc2 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -284,13 +284,13 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev) void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, - bool enable) + u8 enable_mask) { if (!pin) return; - WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, - enable ? AUDIO_ENABLED : 0); + WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, + enable_mask ? AUDIO_ENABLED : 0); } static const u32 pin_offsets[7] = diff --git a/drivers/gpu/drm/radeon/drm_buffer.c b/drivers/gpu/drm/radeon/drm_buffer.c new file mode 100644 index 00000000000..f4e0f3a3d7b --- /dev/null +++ b/drivers/gpu/drm/radeon/drm_buffer.c @@ -0,0 +1,177 @@ +/************************************************************************** + * + * Copyright 2010 Pauli Nieminen. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Multipart buffer for coping data which is larger than the page size. + * + * Authors: + * Pauli Nieminen <suokkos-at-gmail-dot-com> + */ + +#include <linux/export.h> +#include "drm_buffer.h" + +/** + * Allocate the drm buffer object. + * + * buf: Pointer to a pointer where the object is stored. + * size: The number of bytes to allocate. + */ +int drm_buffer_alloc(struct drm_buffer **buf, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + /* Allocating pointer table to end of structure makes drm_buffer + * variable sized */ + *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), + GFP_KERNEL); + + if (*buf == NULL) { + DRM_ERROR("Failed to allocate drm buffer object to hold" + " %d bytes in %d pages.\n", + size, nr_pages); + return -ENOMEM; + } + + (*buf)->size = size; + + for (idx = 0; idx < nr_pages; ++idx) { + + (*buf)->data[idx] = + kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE), + GFP_KERNEL); + + + if ((*buf)->data[idx] == NULL) { + DRM_ERROR("Failed to allocate %dth page for drm" + " buffer with %d bytes and %d pages.\n", + idx + 1, size, nr_pages); + goto error_out; + } + + } + + return 0; + +error_out: + + for (; idx >= 0; --idx) + kfree((*buf)->data[idx]); + + kfree(*buf); + return -ENOMEM; +} + +/** + * Copy the user data to the begin of the buffer and reset the processing + * iterator. + * + * user_data: A pointer the data that is copied to the buffer. + * size: The Number of bytes to copy. + */ +int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + if (size > buf->size) { + DRM_ERROR("Requesting to copy %d bytes to a drm buffer with" + " %d bytes space\n", + size, buf->size); + return -EFAULT; + } + + for (idx = 0; idx < nr_pages; ++idx) { + + if (copy_from_user(buf->data[idx], + user_data + idx * PAGE_SIZE, + min(PAGE_SIZE, size - idx * PAGE_SIZE))) { + DRM_ERROR("Failed to copy user data (%p) to drm buffer" + " (%p) %dth page.\n", + user_data, buf, idx); + return -EFAULT; + + } + } + buf->iterator = 0; + return 0; +} + +/** + * Free the drm buffer object + */ +void drm_buffer_free(struct drm_buffer *buf) +{ + + if (buf != NULL) { + + int nr_pages = buf->size / PAGE_SIZE + 1; + int idx; + for (idx = 0; idx < nr_pages; ++idx) + kfree(buf->data[idx]); + + kfree(buf); + } +} + +/** + * Read an object from buffer that may be split to multiple parts. If object + * is not split function just returns the pointer to object in buffer. But in + * case of split object data is copied to given stack object that is suplied + * by caller. + * + * The processing location of the buffer is also advanced to the next byte + * after the object. + * + * objsize: The size of the objet in bytes. + * stack_obj: A pointer to a memory location where object can be copied. + */ +void *drm_buffer_read_object(struct drm_buffer *buf, + int objsize, void *stack_obj) +{ + int idx = drm_buffer_index(buf); + int page = drm_buffer_page(buf); + void *obj = NULL; + + if (idx + objsize <= PAGE_SIZE) { + obj = &buf->data[page][idx]; + } else { + /* The object is split which forces copy to temporary object.*/ + int beginsz = PAGE_SIZE - idx; + memcpy(stack_obj, &buf->data[page][idx], beginsz); + + memcpy(stack_obj + beginsz, &buf->data[page + 1][0], + objsize - beginsz); + + obj = stack_obj; + } + + drm_buffer_advance(buf, objsize); + return obj; +} diff --git a/drivers/gpu/drm/radeon/drm_buffer.h b/drivers/gpu/drm/radeon/drm_buffer.h new file mode 100644 index 00000000000..c80d3a340b9 --- /dev/null +++ b/drivers/gpu/drm/radeon/drm_buffer.h @@ -0,0 +1,148 @@ +/************************************************************************** + * + * Copyright 2010 Pauli Nieminen. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Multipart buffer for coping data which is larger than the page size. + * + * Authors: + * Pauli Nieminen <suokkos-at-gmail-dot-com> + */ + +#ifndef _DRM_BUFFER_H_ +#define _DRM_BUFFER_H_ + +#include <drm/drmP.h> + +struct drm_buffer { + int iterator; + int size; + char *data[]; +}; + + +/** + * Return the index of page that buffer is currently pointing at. + */ +static inline int drm_buffer_page(struct drm_buffer *buf) +{ + return buf->iterator / PAGE_SIZE; +} +/** + * Return the index of the current byte in the page + */ +static inline int drm_buffer_index(struct drm_buffer *buf) +{ + return buf->iterator & (PAGE_SIZE - 1); +} +/** + * Return number of bytes that is left to process + */ +static inline int drm_buffer_unprocessed(struct drm_buffer *buf) +{ + return buf->size - buf->iterator; +} + +/** + * Advance the buffer iterator number of bytes that is given. + */ +static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes) +{ + buf->iterator += bytes; +} + +/** + * Allocate the drm buffer object. + * + * buf: A pointer to a pointer where the object is stored. + * size: The number of bytes to allocate. + */ +extern int drm_buffer_alloc(struct drm_buffer **buf, int size); + +/** + * Copy the user data to the begin of the buffer and reset the processing + * iterator. + * + * user_data: A pointer the data that is copied to the buffer. + * size: The Number of bytes to copy. + */ +extern int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size); + +/** + * Free the drm buffer object + */ +extern void drm_buffer_free(struct drm_buffer *buf); + +/** + * Read an object from buffer that may be split to multiple parts. If object + * is not split function just returns the pointer to object in buffer. But in + * case of split object data is copied to given stack object that is suplied + * by caller. + * + * The processing location of the buffer is also advanced to the next byte + * after the object. + * + * objsize: The size of the objet in bytes. + * stack_obj: A pointer to a memory location where object can be copied. + */ +extern void *drm_buffer_read_object(struct drm_buffer *buf, + int objsize, void *stack_obj); + +/** + * Returns the pointer to the dword which is offset number of elements from the + * current processing location. + * + * Caller must make sure that dword is not split in the buffer. This + * requirement is easily met if all the sizes of objects in buffer are + * multiples of dword and PAGE_SIZE is multiple dword. + * + * Call to this function doesn't change the processing location. + * + * offset: The index of the dword relative to the internat iterator. + */ +static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer, + int offset) +{ + int iter = buffer->iterator + offset * 4; + return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; +} +/** + * Returns the pointer to the dword which is offset number of elements from + * the current processing location. + * + * Call to this function doesn't change the processing location. + * + * offset: The index of the byte relative to the internat iterator. + */ +static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer, + int offset) +{ + int iter = buffer->iterator + offset; + return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; +} + +#endif diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e50807c29f6..a31f1ca40c6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -22,7 +22,6 @@ * Authors: Alex Deucher */ #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <drm/drmP.h> #include "radeon.h" @@ -4023,7 +4022,7 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.save_restore_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.save_restore_obj); + NULL, &rdev->rlc.save_restore_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -4102,7 +4101,7 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.clear_state_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.clear_state_obj); + NULL, &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); sumo_rlc_fini(rdev); @@ -4179,7 +4178,7 @@ int sumo_rlc_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.cp_table_obj); + NULL, &rdev->rlc.cp_table_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); sumo_rlc_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index afaba388c36..66bcfadeedd 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /** diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 278c7a139d7..2514d659b1b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -38,6 +38,37 @@ extern void dce6_afmt_select_pin(struct drm_encoder *encoder); extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode); +/* enable the audio stream */ +static void dce4_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + u8 enable_mask) +{ + u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL); + + if (!pin) + return; + + if (enable_mask) { + tmp |= AUDIO_ENABLED; + if (enable_mask & 1) + tmp |= PIN0_AUDIO_ENABLED; + if (enable_mask & 2) + tmp |= PIN1_AUDIO_ENABLED; + if (enable_mask & 4) + tmp |= PIN2_AUDIO_ENABLED; + if (enable_mask & 8) + tmp |= PIN3_AUDIO_ENABLED; + } else { + tmp &= ~(AUDIO_ENABLED | + PIN0_AUDIO_ENABLED | + PIN1_AUDIO_ENABLED | + PIN2_AUDIO_ENABLED | + PIN3_AUDIO_ENABLED); + } + + WREG32(AZ_HOT_PLUG_CONTROL, tmp); +} + /* * update the N and CTS parameters for a given pixel clock rate */ @@ -318,10 +349,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode /* disable audio prior to setting up hw */ if (ASIC_IS_DCE6(rdev)) { dig->afmt->pin = dce6_audio_get_pin(rdev); - dce6_audio_enable(rdev, dig->afmt->pin, false); + dce6_audio_enable(rdev, dig->afmt->pin, 0); } else { dig->afmt->pin = r600_audio_get_pin(rdev); - r600_audio_enable(rdev, dig->afmt->pin, false); + dce4_audio_enable(rdev, dig->afmt->pin, 0); } evergreen_audio_set_dto(encoder, mode->clock); @@ -463,13 +494,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode /* enable audio after to setting up hw */ if (ASIC_IS_DCE6(rdev)) - dce6_audio_enable(rdev, dig->afmt->pin, true); + dce6_audio_enable(rdev, dig->afmt->pin, 1); else - r600_audio_enable(rdev, dig->afmt->pin, true); + dce4_audio_enable(rdev, dig->afmt->pin, 0xf); } void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -482,6 +515,14 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; + if (!enable && dig->afmt->pin) { + if (ASIC_IS_DCE6(rdev)) + dce6_audio_enable(rdev, dig->afmt->pin, 0); + else + dce4_audio_enable(rdev, dig->afmt->pin, 0); + dig->afmt->pin = NULL; + } + dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 67cb472d188..1dd976f447f 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2787,6 +2787,8 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); seq_printf(m, "power level %d sclk: %u vddc: %u\n", current_index, sclk, vddc); } diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 01fc4888e6f..715b181c624 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -789,7 +789,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_mclk_switching; u32 mclk; u16 vddci; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -816,29 +815,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, } } - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - for (i = 0; i < ps->performance_level_count; i++) { - if (max_sclk_vddc) { - if (ps->performance_levels[i].sclk > max_sclk_vddc) - ps->performance_levels[i].sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->performance_levels[i].mclk > max_mclk_vddci) - ps->performance_levels[i].mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->performance_levels[i].mclk > max_mclk_vddc) - ps->performance_levels[i].mclk = max_mclk_vddc; - } - } - /* XXX validate the min clocks required for display */ /* adjust low state */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index b0098e792e6..10f8be0ee17 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -869,13 +869,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, return false; } -int r100_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_fence *fence; uint32_t cur_pages; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t pitch; @@ -896,7 +897,7 @@ int r100_copy_blit(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ring, ndw); if (r) { DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); - return -EINVAL; + return ERR_PTR(-EINVAL); } while (num_gpu_pages > 0) { cur_pages = num_gpu_pages; @@ -936,11 +937,13 @@ int r100_copy_blit(struct radeon_device *rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); + r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - return r; + return fence; } static int r100_cp_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 67780374a65..732d4938aab 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) return vtx_size; } -int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_fence *fence; uint32_t size; uint32_t cur_size; int i, num_loops; @@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } /* Must wait for 2D idle & clean before DMA or hangs might happen */ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); @@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev, } radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); + r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 84b1d5367a1..9418e388b04 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -34,10 +34,10 @@ */ #include <drm/drmP.h> -#include <drm/drm_buffer.h> #include <drm/radeon_drm.h> #include "radeon_drv.h" #include "r300_reg.h" +#include "drm_buffer.h" #include <asm/unaligned.h> diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ea5c9af722e..56b02927cd3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -122,6 +122,94 @@ u32 r600_get_xclk(struct radeon_device *rdev) int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) { + unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; + int r; + + /* bypass vclk and dclk with bclk */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( + UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); + + if (rdev->family >= CHIP_RS780) + WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, + ~UPLL_BYPASS_CNTL); + + if (!vclk || !dclk) { + /* keep the Bypass mode, put PLL to sleep */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + return 0; + } + + if (rdev->clock.spll.reference_freq == 10000) + ref_div = 34; + else + ref_div = 4; + + r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, + ref_div + 1, 0xFFF, 2, 30, ~0, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; + + if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) + fb_div >>= 1; + else + fb_div |= 1; + + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* assert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); + + /* For RS780 we have to choose ref clk */ + if (rdev->family >= CHIP_RS780) + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, + ~UPLL_REFCLK_SRC_SEL_MASK); + + /* set the required fb, ref and post divder values */ + WREG32_P(CG_UPLL_FUNC_CNTL, + UPLL_FB_DIV(fb_div) | + UPLL_REF_DIV(ref_div), + ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); + WREG32_P(CG_UPLL_FUNC_CNTL_2, + UPLL_SW_HILEN(vclk_div >> 1) | + UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | + UPLL_SW_HILEN2(dclk_div >> 1) | + UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | + UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, + ~UPLL_SW_MASK); + + /* give the PLL some time to settle */ + mdelay(15); + + /* deassert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(15); + + /* deassert BYPASS EN */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); + + if (rdev->family >= CHIP_RS780) + WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); + + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* switch VCLK and DCLK selection */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + mdelay(100); + return 0; } @@ -992,6 +1080,8 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); @@ -1042,6 +1132,8 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); radeon_gart_table_vram_unpin(rdev); } @@ -1338,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) if (rdev->vram_scratch.robj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &rdev->vram_scratch.robj); + 0, NULL, NULL, &rdev->vram_scratch.robj); if (r) { return r; } @@ -2792,12 +2884,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int r600_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.blit_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes, tmp; @@ -2807,7 +2900,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -2816,10 +2909,10 @@ int r600_copy_cpdma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); @@ -2846,17 +2939,17 @@ int r600_copy_cpdma(struct radeon_device *rdev, radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } int r600_set_surface_reg(struct radeon_device *rdev, int reg, @@ -2907,6 +3000,18 @@ static int r600_startup(struct radeon_device *rdev) return r; } + if (rdev->has_uvd) { + r = uvd_v1_0_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); + } + } + if (r) + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; + } + /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); @@ -2935,6 +3040,18 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; + if (rdev->has_uvd) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + if (ring->ring_size) { + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); + if (!r) + r = uvd_v1_0_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + } + } + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -2994,6 +3111,10 @@ int r600_suspend(struct radeon_device *rdev) radeon_pm_suspend(rdev); r600_audio_fini(rdev); r600_cp_stop(rdev); + if (rdev->has_uvd) { + uvd_v1_0_fini(rdev); + radeon_uvd_suspend(rdev); + } r600_irq_suspend(rdev); radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); @@ -3073,6 +3194,14 @@ int r600_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + if (rdev->has_uvd) { + r = radeon_uvd_init(rdev); + if (!r) { + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); + } + } + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -3102,6 +3231,10 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_cp_fini(rdev); r600_irq_fini(rdev); + if (rdev->has_uvd) { + uvd_v1_0_fini(rdev); + radeon_uvd_fini(rdev); + } radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); @@ -3235,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->ih.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 0, - NULL, &rdev->ih.ring_obj); + NULL, NULL, &rdev->ih.ring_obj); if (r) { DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c deleted file mode 100644 index bffac10c429..00000000000 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Christian König. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Christian König - */ -#include <drm/drmP.h> -#include "radeon.h" -#include "radeon_reg.h" -#include "radeon_asic.h" -#include "atom.h" - -/* - * check if enc_priv stores radeon_encoder_atom_dig - */ -static bool radeon_dig_encoder(struct drm_encoder *encoder) -{ - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - case ENCODER_OBJECT_ID_INTERNAL_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - return true; - } - return false; -} - -/* - * check if the chipset is supported - */ -static int r600_audio_chipset_supported(struct radeon_device *rdev) -{ - return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev); -} - -struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) -{ - struct r600_audio_pin status; - uint32_t value; - - value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); - - /* number of channels */ - status.channels = (value & 0x7) + 1; - - /* bits per sample */ - switch ((value & 0xF0) >> 4) { - case 0x0: - status.bits_per_sample = 8; - break; - case 0x1: - status.bits_per_sample = 16; - break; - case 0x2: - status.bits_per_sample = 20; - break; - case 0x3: - status.bits_per_sample = 24; - break; - case 0x4: - status.bits_per_sample = 32; - break; - default: - dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n", - (int)value); - status.bits_per_sample = 16; - } - - /* current sampling rate in HZ */ - if (value & 0x4000) - status.rate = 44100; - else - status.rate = 48000; - status.rate *= ((value >> 11) & 0x7) + 1; - status.rate /= ((value >> 8) & 0x7) + 1; - - value = RREG32(R600_AUDIO_STATUS_BITS); - - /* iec 60958 status bits */ - status.status_bits = value & 0xff; - - /* iec 60958 category code */ - status.category_code = (value >> 8) & 0xff; - - return status; -} - -/* - * update all hdmi interfaces with current audio parameters - */ -void r600_audio_update_hdmi(struct work_struct *work) -{ - struct radeon_device *rdev = container_of(work, struct radeon_device, - audio_work); - struct drm_device *dev = rdev->ddev; - struct r600_audio_pin audio_status = r600_audio_status(rdev); - struct drm_encoder *encoder; - bool changed = false; - - if (rdev->audio.pin[0].channels != audio_status.channels || - rdev->audio.pin[0].rate != audio_status.rate || - rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample || - rdev->audio.pin[0].status_bits != audio_status.status_bits || - rdev->audio.pin[0].category_code != audio_status.category_code) { - rdev->audio.pin[0] = audio_status; - changed = true; - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (!radeon_dig_encoder(encoder)) - continue; - if (changed || r600_hdmi_buffer_status_changed(encoder)) - r600_hdmi_update_audio_settings(encoder); - } -} - -/* enable the audio stream */ -void r600_audio_enable(struct radeon_device *rdev, - struct r600_audio_pin *pin, - bool enable) -{ - u32 value = 0; - - if (!pin) - return; - - if (ASIC_IS_DCE4(rdev)) { - if (enable) { - value |= 0x81000000; /* Required to enable audio */ - value |= 0x0e1000f0; /* fglrx sets that too */ - } - WREG32(EVERGREEN_AUDIO_ENABLE, value); - } else { - WREG32_P(R600_AUDIO_ENABLE, - enable ? 0x81000000 : 0x0, ~0x81000000); - } -} - -/* - * initialize the audio vars - */ -int r600_audio_init(struct radeon_device *rdev) -{ - if (!radeon_audio || !r600_audio_chipset_supported(rdev)) - return 0; - - rdev->audio.enabled = true; - - rdev->audio.num_pins = 1; - rdev->audio.pin[0].channels = -1; - rdev->audio.pin[0].rate = -1; - rdev->audio.pin[0].bits_per_sample = -1; - rdev->audio.pin[0].status_bits = 0; - rdev->audio.pin[0].category_code = 0; - rdev->audio.pin[0].id = 0; - /* disable audio. it will be set up later */ - r600_audio_enable(rdev, &rdev->audio.pin[0], false); - - return 0; -} - -/* - * release the audio timer - * TODO: How to do this correctly on SMP systems? - */ -void r600_audio_fini(struct radeon_device *rdev) -{ - if (!rdev->audio.enabled) - return; - - r600_audio_enable(rdev, &rdev->audio.pin[0], false); - - rdev->audio.enabled = false; -} - -struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) -{ - /* only one pin on 6xx-NI */ - return &rdev->audio.pin[0]; -} diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 8c9b7e26533..09e3f39925f 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -1949,15 +1949,15 @@ int r600_do_cleanup_cp(struct drm_device *dev) #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { - drm_core_ioremapfree(dev_priv->cp_ring, dev); + drm_legacy_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; } if (dev_priv->ring_rptr != NULL) { - drm_core_ioremapfree(dev_priv->ring_rptr, dev); + drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } if (dev->agp_buffer_map != NULL) { - drm_core_ioremapfree(dev->agp_buffer_map, dev); + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else @@ -1968,7 +1968,7 @@ int r600_do_cleanup_cp(struct drm_device *dev) r600_page_table_cleanup(dev, &dev_priv->gart_info); if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { - drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); + drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = NULL; } } @@ -2052,27 +2052,27 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->buffers_offset = init->buffers_offset; dev_priv->gart_textures_offset = init->gart_textures_offset; - master_priv->sarea = drm_getsarea(dev); + master_priv->sarea = drm_legacy_getsarea(dev); if (!master_priv->sarea) { DRM_ERROR("could not find sarea!\n"); r600_do_cleanup_cp(dev); return -EINVAL; } - dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); + dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); r600_do_cleanup_cp(dev); return -EINVAL; } - dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); + dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); r600_do_cleanup_cp(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); r600_do_cleanup_cp(dev); @@ -2081,7 +2081,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, if (init->gart_textures_offset) { dev_priv->gart_textures = - drm_core_findmap(dev, init->gart_textures_offset); + drm_legacy_findmap(dev, init->gart_textures_offset); if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); r600_do_cleanup_cp(dev); @@ -2092,9 +2092,9 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, #if __OS_HAS_AGP /* XXX */ if (dev_priv->flags & RADEON_IS_AGP) { - drm_core_ioremap_wc(dev_priv->cp_ring, dev); - drm_core_ioremap_wc(dev_priv->ring_rptr, dev); - drm_core_ioremap_wc(dev->agp_buffer_map, dev); + drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); + drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); + drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { @@ -2235,7 +2235,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; - drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); + drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); if (!dev_priv->gart_info.mapping.handle) { DRM_ERROR("ioremap failed.\n"); r600_do_cleanup_cp(dev); diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index a908daa006d..100189ec5fa 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -427,18 +427,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (r6xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -448,7 +449,7 @@ int r600_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -457,10 +458,10 @@ int r600_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -477,15 +478,15 @@ int r600_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 26ef8ced6f8..b90dc0eb08e 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -72,6 +72,169 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { /* + * check if the chipset is supported + */ +static int r600_audio_chipset_supported(struct radeon_device *rdev) +{ + return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev); +} + +static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) +{ + struct r600_audio_pin status; + uint32_t value; + + value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); + + /* number of channels */ + status.channels = (value & 0x7) + 1; + + /* bits per sample */ + switch ((value & 0xF0) >> 4) { + case 0x0: + status.bits_per_sample = 8; + break; + case 0x1: + status.bits_per_sample = 16; + break; + case 0x2: + status.bits_per_sample = 20; + break; + case 0x3: + status.bits_per_sample = 24; + break; + case 0x4: + status.bits_per_sample = 32; + break; + default: + dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n", + (int)value); + status.bits_per_sample = 16; + } + + /* current sampling rate in HZ */ + if (value & 0x4000) + status.rate = 44100; + else + status.rate = 48000; + status.rate *= ((value >> 11) & 0x7) + 1; + status.rate /= ((value >> 8) & 0x7) + 1; + + value = RREG32(R600_AUDIO_STATUS_BITS); + + /* iec 60958 status bits */ + status.status_bits = value & 0xff; + + /* iec 60958 category code */ + status.category_code = (value >> 8) & 0xff; + + return status; +} + +/* + * update all hdmi interfaces with current audio parameters + */ +void r600_audio_update_hdmi(struct work_struct *work) +{ + struct radeon_device *rdev = container_of(work, struct radeon_device, + audio_work); + struct drm_device *dev = rdev->ddev; + struct r600_audio_pin audio_status = r600_audio_status(rdev); + struct drm_encoder *encoder; + bool changed = false; + + if (rdev->audio.pin[0].channels != audio_status.channels || + rdev->audio.pin[0].rate != audio_status.rate || + rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample || + rdev->audio.pin[0].status_bits != audio_status.status_bits || + rdev->audio.pin[0].category_code != audio_status.category_code) { + rdev->audio.pin[0] = audio_status; + changed = true; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (!radeon_encoder_is_digital(encoder)) + continue; + if (changed || r600_hdmi_buffer_status_changed(encoder)) + r600_hdmi_update_audio_settings(encoder); + } +} + +/* enable the audio stream */ +void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + u8 enable_mask) +{ + u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL); + + if (!pin) + return; + + if (enable_mask) { + tmp |= AUDIO_ENABLED; + if (enable_mask & 1) + tmp |= PIN0_AUDIO_ENABLED; + if (enable_mask & 2) + tmp |= PIN1_AUDIO_ENABLED; + if (enable_mask & 4) + tmp |= PIN2_AUDIO_ENABLED; + if (enable_mask & 8) + tmp |= PIN3_AUDIO_ENABLED; + } else { + tmp &= ~(AUDIO_ENABLED | + PIN0_AUDIO_ENABLED | + PIN1_AUDIO_ENABLED | + PIN2_AUDIO_ENABLED | + PIN3_AUDIO_ENABLED); + } + + WREG32(AZ_HOT_PLUG_CONTROL, tmp); +} + +/* + * initialize the audio vars + */ +int r600_audio_init(struct radeon_device *rdev) +{ + if (!radeon_audio || !r600_audio_chipset_supported(rdev)) + return 0; + + rdev->audio.enabled = true; + + rdev->audio.num_pins = 1; + rdev->audio.pin[0].channels = -1; + rdev->audio.pin[0].rate = -1; + rdev->audio.pin[0].bits_per_sample = -1; + rdev->audio.pin[0].status_bits = 0; + rdev->audio.pin[0].category_code = 0; + rdev->audio.pin[0].id = 0; + /* disable audio. it will be set up later */ + r600_audio_enable(rdev, &rdev->audio.pin[0], 0); + + return 0; +} + +/* + * release the audio timer + * TODO: How to do this correctly on SMP systems? + */ +void r600_audio_fini(struct radeon_device *rdev) +{ + if (!rdev->audio.enabled) + return; + + r600_audio_enable(rdev, &rdev->audio.pin[0], 0); + + rdev->audio.enabled = false; +} + +struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) +{ + /* only one pin on 6xx-NI */ + return &rdev->audio.pin[0]; +} + +/* * calculate CTS and N values if they are not found in the table */ static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq) @@ -357,7 +520,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod /* disable audio prior to setting up hw */ dig->afmt->pin = r600_audio_get_pin(rdev); - r600_audio_enable(rdev, dig->afmt->pin, false); + r600_audio_enable(rdev, dig->afmt->pin, 0xf); r600_audio_set_dto(encoder, mode->clock); @@ -443,7 +606,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); /* enable audio after to setting up hw */ - r600_audio_enable(rdev, dig->afmt->pin, true); + r600_audio_enable(rdev, dig->afmt->pin, 0xf); } /** @@ -528,6 +691,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; + if (!enable && dig->afmt->pin) { + r600_audio_enable(rdev, dig->afmt->pin, 0); + dig->afmt->pin = NULL; + } + /* Older chipsets require setting HDMI and routing manually */ if (!ASIC_IS_DCE3(rdev)) { if (enable) diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 31e1052ad3e..1e8495cca41 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -323,11 +323,12 @@ #define HDP_TILING_CONFIG 0x2F3C #define HDP_DEBUG1 0x2F34 +#define MC_CONFIG 0x2000 #define MC_VM_AGP_TOP 0x2184 #define MC_VM_AGP_BOT 0x2188 #define MC_VM_AGP_BASE 0x218C #define MC_VM_FB_LOCATION 0x2180 -#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C +#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124 #define ENABLE_L1_TLB (1 << 0) #define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) #define ENABLE_L1_STRICT_ORDERING (1 << 2) @@ -347,12 +348,14 @@ #define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15) #define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000 #define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15 +#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C #define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0 #define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC #define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204 #define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208 #define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C #define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200 +#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c #define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4 #define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8 #define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210 @@ -366,6 +369,8 @@ #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 +#define RS_DQ_RD_RET_CONF 0x2348 + #define PA_CL_ENHANCE 0x8A14 #define CLIP_VTX_REORDER_ENA (1 << 0) #define NUM_CLIP_SEQ(x) ((x) << 1) @@ -922,6 +927,23 @@ # define TARGET_LINK_SPEED_MASK (0xf << 0) # define SELECTABLE_DEEMPHASIS (1 << 6) +/* Audio */ +#define AZ_HOT_PLUG_CONTROL 0x7300 +# define AZ_FORCE_CODEC_WAKE (1 << 0) +# define JACK_DETECTION_ENABLE (1 << 4) +# define UNSOLICITED_RESPONSE_ENABLE (1 << 8) +# define CODEC_HOT_PLUG_ENABLE (1 << 12) +# define AUDIO_ENABLED (1 << 31) +/* DCE3 adds */ +# define PIN0_JACK_DETECTION_ENABLE (1 << 4) +# define PIN1_JACK_DETECTION_ENABLE (1 << 5) +# define PIN2_JACK_DETECTION_ENABLE (1 << 6) +# define PIN3_JACK_DETECTION_ENABLE (1 << 7) +# define PIN0_AUDIO_ENABLED (1 << 24) +# define PIN1_AUDIO_ENABLED (1 << 25) +# define PIN2_AUDIO_ENABLED (1 << 26) +# define PIN3_AUDIO_ENABLED (1 << 27) + /* Audio clocks DCE 2.0/3.0 */ #define AUDIO_DTO 0x7340 # define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0) @@ -1476,6 +1498,7 @@ #define UVD_CGC_GATE 0xf4a8 #define UVD_LMI_CTRL2 0xf4f4 #define UVD_MASTINT_EN 0xf500 +#define UVD_FW_START 0xf51C #define UVD_LMI_ADDR_EXT 0xf594 #define UVD_LMI_CTRL 0xf598 #define UVD_LMI_SWAP_CNTL 0xf5b4 @@ -1488,6 +1511,13 @@ #define UVD_MPC_SET_MUX 0xf5f4 #define UVD_MPC_SET_ALU 0xf5f8 +#define UVD_VCPU_CACHE_OFFSET0 0xf608 +#define UVD_VCPU_CACHE_SIZE0 0xf60c +#define UVD_VCPU_CACHE_OFFSET1 0xf610 +#define UVD_VCPU_CACHE_SIZE1 0xf614 +#define UVD_VCPU_CACHE_OFFSET2 0xf618 +#define UVD_VCPU_CACHE_SIZE2 0xf61c + #define UVD_VCPU_CNTL 0xf660 #define UVD_SOFT_RESET 0xf680 #define RBC_SOFT_RESET (1<<0) @@ -1517,9 +1547,35 @@ #define UVD_CONTEXT_ID 0xf6f4 +/* rs780 only */ +#define GFX_MACRO_BYPASS_CNTL 0x30c0 +#define SPLL_BYPASS_CNTL (1 << 0) +#define UPLL_BYPASS_CNTL (1 << 1) + +#define CG_UPLL_FUNC_CNTL 0x7e0 +# define UPLL_RESET_MASK 0x00000001 +# define UPLL_SLEEP_MASK 0x00000002 +# define UPLL_BYPASS_EN_MASK 0x00000004 # define UPLL_CTLREQ_MASK 0x00000008 +# define UPLL_FB_DIV(x) ((x) << 4) +# define UPLL_FB_DIV_MASK 0x0000FFF0 +# define UPLL_REF_DIV(x) ((x) << 16) +# define UPLL_REF_DIV_MASK 0x003F0000 +# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000 # define UPLL_CTLACK_MASK 0x40000000 # define UPLL_CTLACK2_MASK 0x80000000 +#define CG_UPLL_FUNC_CNTL_2 0x7e4 +# define UPLL_SW_HILEN(x) ((x) << 0) +# define UPLL_SW_LOLEN(x) ((x) << 4) +# define UPLL_SW_HILEN2(x) ((x) << 8) +# define UPLL_SW_LOLEN2(x) ((x) << 12) +# define UPLL_DIVEN_MASK 0x00010000 +# define UPLL_DIVEN2_MASK 0x00020000 +# define UPLL_SW_MASK 0x0003FFFF +# define VCLK_SRC_SEL(x) ((x) << 20) +# define VCLK_SRC_SEL_MASK 0x01F00000 +# define DCLK_SRC_SEL(x) ((x) << 25) +# define DCLK_SRC_SEL_MASK 0x3E000000 /* * PM4 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3247bfd1441..f7c4b226a28 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -65,6 +65,8 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/interval_tree.h> +#include <linux/hashtable.h> +#include <linux/fence.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -72,6 +74,8 @@ #include <ttm/ttm_module.h> #include <ttm/ttm_execbuf_util.h> +#include <drm/drm_gem.h> + #include "radeon_family.h" #include "radeon_mode.h" #include "radeon_reg.h" @@ -120,9 +124,6 @@ extern int radeon_backlight; #define RADEONFB_CONN_LIMIT 4 #define RADEON_BIOS_NUM_SCRATCH 8 -/* fence seq are set to this number when signaled */ -#define RADEON_FENCE_SIGNALED_SEQ 0LL - /* internal ring indices */ /* r1xx+ has gfx CP ring */ #define RADEON_RING_TYPE_GFX_INDEX 0 @@ -350,28 +351,32 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, * Fences. */ struct radeon_fence_driver { + struct radeon_device *rdev; uint32_t scratch_reg; uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; - bool initialized; + bool initialized, delayed_irq; + struct delayed_work lockup_work; }; struct radeon_fence { + struct fence base; + struct radeon_device *rdev; - struct kref kref; - /* protected by radeon_fence.lock */ uint64_t seq; /* RB, DMA, etc. */ unsigned ring; + + wait_queue_t fence_wake; }; int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_init(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev); -void radeon_fence_driver_force_completion(struct radeon_device *rdev); +void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); @@ -469,7 +474,7 @@ struct radeon_bo { struct list_head list; /* Protected by tbo.reserved */ u32 initial_domain; - u32 placements[3]; + struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; @@ -489,6 +494,9 @@ struct radeon_bo { struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; + + struct radeon_mn *mn; + struct interval_tree_node mn_it; }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) @@ -580,8 +588,12 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); -void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, - struct radeon_fence *fence); +void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore, + struct radeon_fence *fence); +int radeon_semaphore_sync_resv(struct radeon_device *rdev, + struct radeon_semaphore *semaphore, + struct reservation_object *resv, + bool shared); int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int waiting_ring); @@ -702,7 +714,7 @@ struct radeon_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; - struct radeon_fence *fence; + struct fence *fence; }; struct r500_irq_stat_regs { @@ -780,6 +792,7 @@ struct radeon_irq { int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); +bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); @@ -1642,7 +1655,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); -void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo); +void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, + uint32_t allowed_domains); void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp); int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); @@ -1731,6 +1745,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, struct radeon_ring *cpB); void radeon_test_syncing(struct radeon_device *rdev); +/* + * MMU Notifier + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); +void radeon_mn_unregister(struct radeon_bo *bo); /* * Debugfs @@ -1845,24 +1864,24 @@ struct radeon_asic { } display; /* copy functions for bo handling */ struct { - int (*blit)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*blit)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 blit_ring_index; - int (*dma)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*dma)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 dma_ring_index; /* method used for bo copy */ - int (*copy)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*copy)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); /* ring used for bo copies */ u32 copy_ring_index; } copy; @@ -2144,6 +2163,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, @@ -2300,6 +2321,7 @@ struct radeon_device { struct radeon_mman mman; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; wait_queue_head_t fence_queue; + unsigned fence_context; struct mutex ring_lock; struct radeon_ring ring[RADEON_NUM_RINGS]; bool ib_pool_ready; @@ -2318,7 +2340,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/ - bool needs_reset; + bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ @@ -2339,7 +2361,6 @@ struct radeon_device { struct radeon_mec mec; struct work_struct hotplug_work; struct work_struct audio_work; - struct work_struct reset_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ bool has_uvd; @@ -2376,6 +2397,9 @@ struct radeon_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size; + + struct mutex mn_lock; + DECLARE_HASHTABLE(mn_hash, 7); }; bool radeon_is_px(struct drm_device *dev); @@ -2431,7 +2455,17 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); /* * Cast helper */ -#define to_radeon_fence(p) ((struct radeon_fence *)(p)) +extern const struct fence_ops radeon_fence_ops; + +static inline struct radeon_fence *to_radeon_fence(struct fence *f) +{ + struct radeon_fence *__f = container_of(f, struct radeon_fence, base); + + if (__f->base.ops == &radeon_fence_ops) + return __f; + + return NULL; +} /* * Registers read & write functions. @@ -2751,18 +2785,25 @@ void radeon_atombios_fini(struct radeon_device *rdev); /* * RING helpers. */ -#if DRM_DEBUG_CODE == 0 + +/** + * radeon_ring_write - write a value to the ring + * + * @ring: radeon_ring structure holding ring information + * @v: dword (dw) value to write + * + * Write a value to the requested ring buffer (all asics). + */ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) { + if (ring->count_dw <= 0) + DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); + ring->ring[ring->wptr++] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; ring->ring_free_dw--; } -#else -/* With debugging this is just too big to inline */ -void radeon_ring_write(struct radeon_ring *ring, uint32_t v); -#endif /* * ASICs macro. @@ -2801,9 +2842,9 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) -#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) -#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) -#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) +#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv)) +#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) +#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index @@ -2877,6 +2918,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); +extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags); +extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); +extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); @@ -2934,10 +2979,10 @@ struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); void r600_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, - bool enable); + u8 enable_mask); void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, - bool enable); + u8 enable_mask); /* * R600 vram scratch functions diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 2dd5847f9b9..850de57069b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -963,6 +963,19 @@ static struct radeon_asic r600_asic = { }, }; +static struct radeon_asic_ring rv6xx_uvd_ring = { + .ib_execute = &uvd_v1_0_ib_execute, + .emit_fence = &uvd_v1_0_fence_emit, + .emit_semaphore = &uvd_v1_0_semaphore_emit, + .cs_parse = &radeon_uvd_cs_parse, + .ring_test = &uvd_v1_0_ring_test, + .ib_test = &uvd_v1_0_ib_test, + .is_lockup = &radeon_ring_test_lockup, + .get_rptr = &uvd_v1_0_get_rptr, + .get_wptr = &uvd_v1_0_get_wptr, + .set_wptr = &uvd_v1_0_set_wptr, +}; + static struct radeon_asic rv6xx_asic = { .init = &r600_init, .fini = &r600_fini, @@ -982,6 +995,7 @@ static struct radeon_asic rv6xx_asic = { .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring, }, .irq = { .set = &r600_irq_set, @@ -1072,6 +1086,7 @@ static struct radeon_asic rs780_asic = { .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, + [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring, }, .irq = { .set = &r600_irq_set, @@ -2296,7 +2311,15 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RS780: case CHIP_RS880: rdev->asic = &rs780_asic; - rdev->has_uvd = true; + /* 760G/780V/880V don't have UVD */ + if ((rdev->pdev->device == 0x9616)|| + (rdev->pdev->device == 0x9611)|| + (rdev->pdev->device == 0x9613)|| + (rdev->pdev->device == 0x9711)|| + (rdev->pdev->device == 0x9713)) + rdev->has_uvd = false; + else + rdev->has_uvd = true; break; case CHIP_RV770: case CHIP_RV730: diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 7756bc1e1cd..d8ace5b28a5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, int r100_cs_parse(struct radeon_cs_parser *p); void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); -int r100_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); @@ -152,11 +152,11 @@ void r100_gfx_set_wptr(struct radeon_device *rdev, /* * r200,rv250,rs300,rv280 */ -extern int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void r200_set_safe_registers(struct radeon_device *rdev); /* @@ -340,12 +340,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); -int r600_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence **fence); -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence **fence); +struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); +struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -389,7 +391,6 @@ void r600_disable_interrupts(struct radeon_device *rdev); void r600_rlc_stop(struct radeon_device *rdev); /* r600 audio */ int r600_audio_init(struct radeon_device *rdev); -struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); void r600_audio_fini(struct radeon_device *rdev); void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, @@ -461,10 +462,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_cp_stop(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev); -int rv770_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 rv770_get_xclk(struct radeon_device *rdev); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_get_temp(struct radeon_device *rdev); @@ -535,10 +536,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); int evergreen_get_temp(struct radeon_device *rdev); @@ -700,10 +701,10 @@ int si_vm_init(struct radeon_device *rdev); void si_vm_fini(struct radeon_device *rdev); void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, @@ -759,14 +760,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait); void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int cik_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); -int cik_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); +struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); @@ -882,6 +883,7 @@ uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring); void uvd_v1_0_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring); +int uvd_v1_0_resume(struct radeon_device *rdev); int uvd_v1_0_init(struct radeon_device *rdev); void uvd_v1_0_fini(struct radeon_device *rdev); @@ -889,6 +891,8 @@ int uvd_v1_0_start(struct radeon_device *rdev); void uvd_v1_0_stop(struct radeon_device *rdev); int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); +void uvd_v1_0_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence); int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index e74c7e387dd..df69b92ba16 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -458,7 +458,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, return true; } -const int supported_devices_connector_convert[] = { +static const int supported_devices_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_DVII, @@ -477,7 +477,7 @@ const int supported_devices_connector_convert[] = { DRM_MODE_CONNECTOR_DisplayPort }; -const uint16_t supported_devices_connector_object_id_convert[] = { +static const uint16_t supported_devices_connector_object_id_convert[] = { CONNECTOR_OBJECT_ID_NONE, CONNECTOR_OBJECT_ID_VGA, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ @@ -494,7 +494,7 @@ const uint16_t supported_devices_connector_object_id_convert[] = { CONNECTOR_OBJECT_ID_SVIDEO }; -const int object_connector_convert[] = { +static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVII, diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 69f5695bdab..9e7f23dd14b 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, for (i = 0; i < n; i++) { switch (flag) { case RADEON_BENCHMARK_COPY_DMA: - r = radeon_copy_dma(rdev, saddr, daddr, - size / RADEON_GPU_PAGE_SIZE, - &fence); + fence = radeon_copy_dma(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, + NULL); break; case RADEON_BENCHMARK_COPY_BLIT: - r = radeon_copy_blit(rdev, saddr, daddr, - size / RADEON_GPU_PAGE_SIZE, - &fence); + fence = radeon_copy_blit(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, + NULL); break; default: DRM_ERROR("Unknown copy method\n"); - r = -EINVAL; + return -EINVAL; } - if (r) - goto exit_do_move; + if (IS_ERR(fence)) + return PTR_ERR(fence); + r = radeon_fence_wait(fence, false); - if (r) - goto exit_do_move; radeon_fence_unref(&fence); + if (r) + return r; } end_jiffies = jiffies; - r = jiffies_to_msecs(end_jiffies - start_jiffies); - -exit_do_move: - if (fence) - radeon_fence_unref(&fence); - return r; + return jiffies_to_msecs(end_jiffies - start_jiffies); } @@ -97,7 +93,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, int time; n = RADEON_BENCHMARK_ITERATIONS; - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj); if (r) { goto out_cleanup; } @@ -109,7 +105,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (r) { goto out_cleanup; } - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 6651177110f..3e5f6b71f3a 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -116,7 +116,7 @@ enum radeon_combios_connector { CONNECTOR_UNSUPPORTED_LEGACY }; -const int legacy_connector_convert[] = { +static const int legacy_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_VGA, diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index bb0d5c3a831..ea134a7d51a 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1298,27 +1298,27 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->buffers_offset = init->buffers_offset; dev_priv->gart_textures_offset = init->gart_textures_offset; - master_priv->sarea = drm_getsarea(dev); + master_priv->sarea = drm_legacy_getsarea(dev); if (!master_priv->sarea) { DRM_ERROR("could not find sarea!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } - dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); + dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); if (!dev_priv->cp_ring) { DRM_ERROR("could not find cp ring region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } - dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); + dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); if (!dev_priv->ring_rptr) { DRM_ERROR("could not find ring read pointer!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); @@ -1327,7 +1327,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, if (init->gart_textures_offset) { dev_priv->gart_textures = - drm_core_findmap(dev, init->gart_textures_offset); + drm_legacy_findmap(dev, init->gart_textures_offset); if (!dev_priv->gart_textures) { DRM_ERROR("could not find GART texture region!\n"); radeon_do_cleanup_cp(dev); @@ -1337,9 +1337,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { - drm_core_ioremap_wc(dev_priv->cp_ring, dev); - drm_core_ioremap_wc(dev_priv->ring_rptr, dev); - drm_core_ioremap_wc(dev->agp_buffer_map, dev); + drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); + drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); + drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev->agp_buffer_map->handle) { @@ -1475,7 +1475,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->gart_info.mapping.size = dev_priv->gart_info.table_size; - drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); + drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = dev_priv->gart_info.mapping.handle; @@ -1569,15 +1569,15 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { - drm_core_ioremapfree(dev_priv->cp_ring, dev); + drm_legacy_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; } if (dev_priv->ring_rptr != NULL) { - drm_core_ioremapfree(dev_priv->ring_rptr, dev); + drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } if (dev->agp_buffer_map != NULL) { - drm_core_ioremapfree(dev->agp_buffer_map, dev); + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else @@ -1597,7 +1597,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { - drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); + drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); dev_priv->gart_info.addr = NULL; } } @@ -2106,9 +2106,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) else dev_priv->flags |= RADEON_IS_PCI; - ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2), - pci_resource_len(dev->pdev, 2), _DRM_REGISTERS, - _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); + ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2), + pci_resource_len(dev->pdev, 2), _DRM_REGISTERS, + _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); if (ret != 0) return ret; @@ -2135,8 +2135,8 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) /* prebuild the SAREA */ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); - ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, - &master_priv->sarea); + ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, + &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); kfree(master_priv); @@ -2162,7 +2162,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) master_priv->sarea_priv = NULL; if (master_priv->sarea) - drm_rmmap_locked(dev, master_priv->sarea); + drm_legacy_rmmap_locked(dev, master_priv->sarea); kfree(master_priv); @@ -2181,9 +2181,9 @@ int radeon_driver_firstopen(struct drm_device *dev) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0); - ret = drm_addmap(dev, dev_priv->fb_aper_offset, - pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER, - _DRM_WRITE_COMBINING, &map); + ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset, + pci_resource_len(dev->pdev, 0), + _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; @@ -2196,7 +2196,7 @@ int radeon_driver_unload(struct drm_device *dev) DRM_DEBUG("\n"); - drm_rmmap(dev, dev_priv->mmio); + drm_legacy_rmmap(dev, dev_priv->mmio); kfree(dev_priv); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 83f382e8e40..1c893447d7c 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; unsigned i, j; - bool duplicate; + bool duplicate, need_mmap_lock = false; + int r; if (p->chunk_relocs_idx == -1) { return 0; @@ -136,10 +137,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) + !!r->write_domain; /* the first reloc of an UVD job is the msg and that must be in - VRAM, also but everything into VRAM on AGP cards to avoid - image corruptions */ + VRAM, also but everything into VRAM on AGP cards and older + IGP chips to avoid image corruptions */ if (p->ring == R600_RING_TYPE_UVD_INDEX && - (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { + (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) || + p->rdev->family == CHIP_RS780 || + p->rdev->family == CHIP_RS880)) { + /* TODO: is this still needed for NI+ ? */ p->relocs[i].prefered_domains = RADEON_GEM_DOMAIN_VRAM; @@ -165,7 +169,21 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; } + if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { + uint32_t domain = p->relocs[i].prefered_domains; + if (!(domain & RADEON_GEM_DOMAIN_GTT)) { + DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " + "allowed for userptr BOs\n"); + return -EINVAL; + } + need_mmap_lock = true; + domain = RADEON_GEM_DOMAIN_GTT; + p->relocs[i].prefered_domains = domain; + p->relocs[i].allowed_domains = domain; + } + p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; + p->relocs[i].tv.shared = !r->write_domain; p->relocs[i].handle = r->handle; radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, @@ -177,8 +195,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (p->cs_flags & RADEON_CS_USE_VM) p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, &p->validated); + if (need_mmap_lock) + down_read(¤t->mm->mmap_sem); - return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); + r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); + + if (need_mmap_lock) + up_read(¤t->mm->mmap_sem); + + return r; } static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) @@ -224,17 +249,24 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority return 0; } -static void radeon_cs_sync_rings(struct radeon_cs_parser *p) +static int radeon_cs_sync_rings(struct radeon_cs_parser *p) { - int i; + int i, r = 0; for (i = 0; i < p->nrelocs; i++) { + struct reservation_object *resv; + if (!p->relocs[i].robj) continue; - radeon_semaphore_sync_to(p->ib.semaphore, - p->relocs[i].robj->tbo.sync_obj); + resv = p->relocs[i].robj->tbo.resv; + r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, + p->relocs[i].tv.shared); + + if (r) + break; } + return r; } /* XXX: note that this is called from the legacy UMS CS ioctl as well */ @@ -403,7 +435,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, - parser->ib.fence); + &parser->ib.fence->base); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); @@ -444,13 +476,19 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, return r; } + r = radeon_cs_sync_rings(parser); + if (r) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to sync rings: %i\n", r); + return r; + } + if (parser->ring == R600_RING_TYPE_UVD_INDEX) radeon_uvd_note_usage(rdev); else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || (parser->ring == TN_RING_TYPE_VCE2_INDEX)) radeon_vce_note_usage(rdev); - radeon_cs_sync_rings(parser); r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); if (r) { DRM_ERROR("Failed to schedule IB !\n"); @@ -537,8 +575,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (r) { goto out; } - radeon_cs_sync_rings(parser); - radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); + + r = radeon_cs_sync_rings(parser); + if (r) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to sync rings: %i\n", r); + goto out; + } + radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { @@ -629,6 +673,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) up_read(&rdev->exclusive_lock); return -EBUSY; } + if (rdev->in_reset) { + up_read(&rdev->exclusive_lock); + r = radeon_gpu_reset(rdev); + if (!r) + r = -EAGAIN; + return r; + } /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 12c8329644c..f41cc1538e4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -434,7 +434,7 @@ int radeon_wb_init(struct radeon_device *rdev) if (rdev->wb.wb_obj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &rdev->wb.wb_obj); if (r) { dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); @@ -1257,6 +1257,7 @@ int radeon_device_init(struct radeon_device *rdev, for (i = 0; i < RADEON_NUM_RINGS; i++) { rdev->ring[i].idx = i; } + rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", radeon_family_name[rdev->family], pdev->vendor, pdev->device, @@ -1274,6 +1275,8 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); + mutex_init(&rdev->mn_lock); + hash_init(rdev->mn_hash); r = radeon_gem_init(rdev); if (r) return r; @@ -1399,10 +1402,6 @@ int radeon_device_init(struct radeon_device *rdev, if (r) goto failed; - r = radeon_ib_ring_tests(rdev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); - r = radeon_gem_debugfs_init(rdev); if (r) { DRM_ERROR("registering gem debugfs failed (%d).\n", r); @@ -1420,6 +1419,10 @@ int radeon_device_init(struct radeon_device *rdev, goto failed; } + r = radeon_ib_ring_tests(rdev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); @@ -1497,7 +1500,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) struct drm_crtc *crtc; struct drm_connector *connector; int i, r; - bool force_completion = false; if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; @@ -1541,12 +1543,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) r = radeon_fence_wait_empty(rdev, i); if (r) { /* delay GPU reset to resume */ - force_completion = true; + radeon_fence_driver_force_completion(rdev, i); } } - if (force_completion) { - radeon_fence_driver_force_completion(rdev); - } radeon_save_bios_scratch_regs(rdev); @@ -1686,8 +1685,6 @@ int radeon_gpu_reset(struct radeon_device *rdev) return 0; } - rdev->needs_reset = false; - radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); @@ -1704,7 +1701,6 @@ int radeon_gpu_reset(struct radeon_device *rdev) } } -retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); @@ -1713,26 +1709,12 @@ retry: radeon_restore_bios_scratch_regs(rdev); - if (!r) { - for (i = 0; i < RADEON_NUM_RINGS; ++i) { + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (!r && ring_data[i]) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; - } - - r = radeon_ib_ring_tests(rdev); - if (r) { - dev_err(rdev->dev, "ib ring test failed (%d).\n", r); - if (saved) { - saved = false; - radeon_suspend(rdev); - goto retry; - } - } - } else { - radeon_fence_driver_force_completion(rdev); - for (i = 0; i < RADEON_NUM_RINGS; ++i) { + } else { + radeon_fence_driver_force_completion(rdev, i); kfree(ring_data[i]); } } @@ -1764,19 +1746,32 @@ retry: /* reset hpd state */ radeon_hpd_init(rdev); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); + + rdev->in_reset = true; + rdev->needs_reset = false; + + downgrade_write(&rdev->exclusive_lock); + drm_helper_resume_force_mode(rdev->ddev); /* set the power state here in case we are a PX system or headless */ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); - if (r) { + if (!r) { + r = radeon_ib_ring_tests(rdev); + if (r && saved) + r = -EAGAIN; + } else { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); } - up_write(&rdev->exclusive_lock); + rdev->needs_reset = r == -EAGAIN; + rdev->in_reset = false; + + up_read(&rdev->exclusive_lock); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3fdf8731806..00ead8c2758 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -402,12 +402,21 @@ static void radeon_flip_work_func(struct work_struct *__work) down_read(&rdev->exclusive_lock); if (work->fence) { - r = radeon_fence_wait(work->fence, false); - if (r == -EDEADLK) { - up_read(&rdev->exclusive_lock); - r = radeon_gpu_reset(rdev); - down_read(&rdev->exclusive_lock); - } + struct radeon_fence *fence; + + fence = to_radeon_fence(work->fence); + if (fence && fence->rdev == rdev) { + r = radeon_fence_wait(fence, false); + if (r == -EDEADLK) { + up_read(&rdev->exclusive_lock); + do { + r = radeon_gpu_reset(rdev); + } while (r == -EAGAIN); + down_read(&rdev->exclusive_lock); + } + } else + r = fence_wait(work->fence, false); + if (r) DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); @@ -416,7 +425,8 @@ static void radeon_flip_work_func(struct work_struct *__work) * confused about which BO the CRTC is scanning out */ - radeon_fence_unref(&work->fence); + fence_put(work->fence); + work->fence = NULL; } /* We borrow the event spin lock for protecting flip_status */ @@ -474,11 +484,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, obj = new_radeon_fb->obj; new_rbo = gem_to_radeon_bo(obj); - spin_lock(&new_rbo->tbo.bdev->fence_lock); - if (new_rbo->tbo.sync_obj) - work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); - spin_unlock(&new_rbo->tbo.bdev->fence_lock); - /* pin the new buffer */ DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", work->old_rbo, new_rbo); @@ -497,6 +502,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } + work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); @@ -578,9 +584,8 @@ pflip_cleanup: cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - radeon_fence_unref(&work->fence); + fence_put(work->fence); kfree(work); - return r; } @@ -1917,7 +1922,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_INVBL; + ret |= DRM_SCANOUTPOS_IN_VBLANK; /* Is vpos outside nominal vblank area, but less than * 1/100 of a frame height away from start of vblank? diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f9d17b29b34..dcffa30ee2d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -38,6 +38,8 @@ #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> +#include <drm/drm_gem.h> + #include "drm_crtc_helper.h" /* * KMS wrapper. @@ -114,6 +116,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, @@ -130,7 +135,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, - size_t size, + struct dma_buf_attachment *, struct sg_table *sg); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); @@ -309,7 +314,7 @@ static const struct file_operations radeon_driver_old_fops = { .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, - .mmap = drm_mmap, + .mmap = drm_legacy_mmap, .poll = drm_poll, .read = drm_read, #ifdef CONFIG_COMPAT @@ -329,6 +334,7 @@ static struct drm_driver driver_old = { .preclose = radeon_driver_preclose, .postclose = radeon_driver_postclose, .lastclose = radeon_driver_lastclose, + .set_busid = drm_pci_set_busid, .unload = radeon_driver_unload, .suspend = radeon_suspend, .resume = radeon_resume, @@ -553,6 +559,7 @@ static struct drm_driver kms_driver = { .preclose = radeon_driver_preclose_kms, .postclose = radeon_driver_postclose_kms, .lastclose = radeon_driver_lastclose_kms, + .set_busid = drm_pci_set_busid, .unload = radeon_driver_unload_kms, .get_vblank_counter = radeon_get_vblank_counter_kms, .enable_vblank = radeon_enable_vblank_kms, @@ -578,7 +585,7 @@ static struct drm_driver kms_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, + .gem_prime_export = radeon_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = radeon_gem_prime_pin, .gem_prime_unpin = radeon_gem_prime_unpin, diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index dafd812e457..46bd3938282 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -33,7 +33,9 @@ #include <linux/firmware.h> #include <linux/platform_device.h> +#include <drm/drm_legacy.h> +#include <drm/ati_pcigart.h> #include "radeon_family.h" /* General customization: diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 15edf23b465..9a19e52cc65 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -410,3 +410,24 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, } } +bool radeon_encoder_is_digital(struct drm_encoder *encoder) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_LVDS: + case ENCODER_OBJECT_ID_INTERNAL_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + case ENCODER_OBJECT_ID_INTERNAL_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_DDI: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + return true; + default: + return false; + } +} diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 94b0f2aa3d7..0ea1db83d57 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -189,7 +189,8 @@ out_unref: static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { - struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; + struct radeon_fbdev *rfbdev = + container_of(helper, struct radeon_fbdev, helper); struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 913787085df..99516702528 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -98,6 +98,25 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) } /** + * radeon_fence_schedule_check - schedule lockup check + * + * @rdev: radeon_device pointer + * @ring: ring index we should work with + * + * Queues a delayed work item to check for lockups. + */ +static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) +{ + /* + * Do not reset the timer here with mod_delayed_work, + * this can livelock in an interaction with TTM delayed destroy. + */ + queue_delayed_work(system_power_efficient_wq, + &rdev->fence_drv[ring].lockup_work, + RADEON_FENCE_JIFFIES_TIMEOUT); +} + +/** * radeon_fence_emit - emit a fence on the requested ring * * @rdev: radeon_device pointer @@ -111,30 +130,70 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) { + u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; + /* we are protected by the ring emission mutex */ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); if ((*fence) == NULL) { return -ENOMEM; } - kref_init(&((*fence)->kref)); (*fence)->rdev = rdev; - (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; + (*fence)->seq = seq; (*fence)->ring = ring; + fence_init(&(*fence)->base, &radeon_fence_ops, + &rdev->fence_queue.lock, rdev->fence_context + ring, seq); radeon_fence_ring_emit(rdev, ring, *fence); trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); + radeon_fence_schedule_check(rdev, ring); return 0; } /** - * radeon_fence_process - process a fence + * radeon_fence_check_signaled - callback from fence_queue + * + * this function is called with fence_queue lock held, which is also used + * for the fence locking itself, so unlocked variants are used for + * fence_signal, and remove_wait_queue. + */ +static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) +{ + struct radeon_fence *fence; + u64 seq; + + fence = container_of(wait, struct radeon_fence, fence_wake); + + /* + * We cannot use radeon_fence_process here because we're already + * in the waitqueue, in a call from wake_up_all. + */ + seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); + if (seq >= fence->seq) { + int ret = fence_signal_locked(&fence->base); + + if (!ret) + FENCE_TRACE(&fence->base, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->base, "was already signaled\n"); + + radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); + __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); + fence_put(&fence->base); + } else + FENCE_TRACE(&fence->base, "pending\n"); + return 0; +} + +/** + * radeon_fence_activity - check for fence activity * * @rdev: radeon_device pointer * @ring: ring index the fence is associated with * - * Checks the current fence value and wakes the fence queue - * if the sequence number has increased (all asics). + * Checks the current fence value and calculates the last + * signalled fence value. Returns true if activity occured + * on the ring, and the fence_queue should be waken up. */ -void radeon_fence_process(struct radeon_device *rdev, int ring) +static bool radeon_fence_activity(struct radeon_device *rdev, int ring) { uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; @@ -190,23 +249,77 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); - if (wake) - wake_up_all(&rdev->fence_queue); + if (seq < last_emitted) + radeon_fence_schedule_check(rdev, ring); + + return wake; } /** - * radeon_fence_destroy - destroy a fence + * radeon_fence_check_lockup - check for hardware lockup * - * @kref: fence kref + * @work: delayed work item * - * Frees the fence object (all asics). + * Checks for fence activity and if there is none probe + * the hardware if a lockup occured. */ -static void radeon_fence_destroy(struct kref *kref) +static void radeon_fence_check_lockup(struct work_struct *work) { - struct radeon_fence *fence; + struct radeon_fence_driver *fence_drv; + struct radeon_device *rdev; + int ring; + + fence_drv = container_of(work, struct radeon_fence_driver, + lockup_work.work); + rdev = fence_drv->rdev; + ring = fence_drv - &rdev->fence_drv[0]; + + if (!down_read_trylock(&rdev->exclusive_lock)) { + /* just reschedule the check if a reset is going on */ + radeon_fence_schedule_check(rdev, ring); + return; + } + + if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { + unsigned long irqflags; + + fence_drv->delayed_irq = false; + spin_lock_irqsave(&rdev->irq.lock, irqflags); + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + } + + if (radeon_fence_activity(rdev, ring)) + wake_up_all(&rdev->fence_queue); - fence = container_of(kref, struct radeon_fence, kref); - kfree(fence); + else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { + + /* good news we believe it's a lockup */ + dev_warn(rdev->dev, "GPU lockup (current fence id " + "0x%016llx last fence id 0x%016llx on ring %d)\n", + (uint64_t)atomic64_read(&fence_drv->last_seq), + fence_drv->sync_seq[ring], ring); + + /* remember that we need an reset */ + rdev->needs_reset = true; + wake_up_all(&rdev->fence_queue); + } + up_read(&rdev->exclusive_lock); +} + +/** + * radeon_fence_process - process a fence + * + * @rdev: radeon_device pointer + * @ring: ring index the fence is associated with + * + * Checks the current fence value and wakes the fence queue + * if the sequence number has increased (all asics). + */ +void radeon_fence_process(struct radeon_device *rdev, int ring) +{ + if (radeon_fence_activity(rdev, ring)) + wake_up_all(&rdev->fence_queue); } /** @@ -237,6 +350,75 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, return false; } +static bool radeon_fence_is_signaled(struct fence *f) +{ + struct radeon_fence *fence = to_radeon_fence(f); + struct radeon_device *rdev = fence->rdev; + unsigned ring = fence->ring; + u64 seq = fence->seq; + + if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { + return true; + } + + if (down_read_trylock(&rdev->exclusive_lock)) { + radeon_fence_process(rdev, ring); + up_read(&rdev->exclusive_lock); + + if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { + return true; + } + } + return false; +} + +/** + * radeon_fence_enable_signaling - enable signalling on fence + * @fence: fence + * + * This function is called with fence_queue lock held, and adds a callback + * to fence_queue that checks if this fence is signaled, and if so it + * signals the fence and removes itself. + */ +static bool radeon_fence_enable_signaling(struct fence *f) +{ + struct radeon_fence *fence = to_radeon_fence(f); + struct radeon_device *rdev = fence->rdev; + + if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) + return false; + + if (down_read_trylock(&rdev->exclusive_lock)) { + radeon_irq_kms_sw_irq_get(rdev, fence->ring); + + if (radeon_fence_activity(rdev, fence->ring)) + wake_up_all_locked(&rdev->fence_queue); + + /* did fence get signaled after we enabled the sw irq? */ + if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { + radeon_irq_kms_sw_irq_put(rdev, fence->ring); + up_read(&rdev->exclusive_lock); + return false; + } + + up_read(&rdev->exclusive_lock); + } else { + /* we're probably in a lockup, lets not fiddle too much */ + if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) + rdev->fence_drv[fence->ring].delayed_irq = true; + radeon_fence_schedule_check(rdev, fence->ring); + } + + fence->fence_wake.flags = 0; + fence->fence_wake.private = NULL; + fence->fence_wake.func = radeon_fence_check_signaled; + __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); + fence_get(f); + + FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); + return true; +} + /** * radeon_fence_signaled - check if a fence has signaled * @@ -247,14 +429,15 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, */ bool radeon_fence_signaled(struct radeon_fence *fence) { - if (!fence) { + if (!fence) return true; - } - if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { - return true; - } + if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { - fence->seq = RADEON_FENCE_SIGNALED_SEQ; + int ret; + + ret = fence_signal(&fence->base); + if (!ret) + FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); return true; } return false; @@ -283,110 +466,70 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) } /** - * radeon_fence_wait_seq - wait for a specific sequence numbers + * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep + * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). - * Returns 0 if the sequence number has passed, error for all other cases. + * Returns remaining time if the sequence number has passed, 0 when + * the wait timeout, or an error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ -static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, - bool intr) +static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, + u64 *target_seq, bool intr, + long timeout) { - uint64_t last_seq[RADEON_NUM_RINGS]; - bool signaled; - int i, r; - - while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { + long r; + int i; - /* Save current sequence values, used to check for GPU lockups */ - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; + if (radeon_fence_any_seq_signaled(rdev, target_seq)) + return timeout; - last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); - trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); - radeon_irq_kms_sw_irq_get(rdev, i); - } + /* enable IRQs and tracing */ + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (!target_seq[i]) + continue; - if (intr) { - r = wait_event_interruptible_timeout(rdev->fence_queue, ( - (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); - } else { - r = wait_event_timeout(rdev->fence_queue, ( - (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); - } + trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); + radeon_irq_kms_sw_irq_get(rdev, i); + } - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; + if (intr) { + r = wait_event_interruptible_timeout(rdev->fence_queue, ( + radeon_fence_any_seq_signaled(rdev, target_seq) + || rdev->needs_reset), timeout); + } else { + r = wait_event_timeout(rdev->fence_queue, ( + radeon_fence_any_seq_signaled(rdev, target_seq) + || rdev->needs_reset), timeout); + } - radeon_irq_kms_sw_irq_put(rdev, i); - trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); - } + if (rdev->needs_reset) + r = -EDEADLK; - if (unlikely(r < 0)) - return r; + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (!target_seq[i]) + continue; - if (unlikely(!signaled)) { - if (rdev->needs_reset) - return -EDEADLK; - - /* we were interrupted for some reason and fence - * isn't signaled yet, resume waiting */ - if (r) - continue; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; - - if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) - break; - } - - if (i != RADEON_NUM_RINGS) - continue; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; - - if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) - break; - } - - if (i < RADEON_NUM_RINGS) { - /* good news we believe it's a lockup */ - dev_warn(rdev->dev, "GPU lockup (waiting for " - "0x%016llx last fence id 0x%016llx on" - " ring %d)\n", - target_seq[i], last_seq[i], i); - - /* remember that we need an reset */ - rdev->needs_reset = true; - wake_up_all(&rdev->fence_queue); - return -EDEADLK; - } - } + radeon_irq_kms_sw_irq_put(rdev, i); + trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); } - return 0; + + return r; } /** * radeon_fence_wait - wait for a fence to signal * * @fence: radeon fence object - * @intr: use interruptable sleep + * @intr: use interruptible sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable @@ -396,22 +539,26 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, int radeon_fence_wait(struct radeon_fence *fence, bool intr) { uint64_t seq[RADEON_NUM_RINGS] = {}; - int r; + long r; - if (fence == NULL) { - WARN(1, "Querying an invalid fence : %p !\n", fence); - return -EINVAL; - } + /* + * This function should not be called on !radeon fences. + * If this is the case, it would mean this function can + * also be called on radeon fences belonging to another card. + * exclusive_lock is not held in that case. + */ + if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) + return fence_wait(&fence->base, intr); seq[fence->ring] = fence->seq; - if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) - return 0; - - r = radeon_fence_wait_seq(fence->rdev, seq, intr); - if (r) + r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { return r; + } - fence->seq = RADEON_FENCE_SIGNALED_SEQ; + r = fence_signal(&fence->base); + if (!r) + FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); return 0; } @@ -434,7 +581,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, { uint64_t seq[RADEON_NUM_RINGS]; unsigned i, num_rings = 0; - int r; + long r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { seq[i] = 0; @@ -445,18 +592,14 @@ int radeon_fence_wait_any(struct radeon_device *rdev, seq[i] = fences[i]->seq; ++num_rings; - - /* test if something was allready signaled */ - if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) - return 0; } /* nothing to wait for ? */ if (num_rings == 0) return -ENOENT; - r = radeon_fence_wait_seq(rdev, seq, intr); - if (r) { + r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { return r; } return 0; @@ -475,6 +618,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, int radeon_fence_wait_next(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; + long r; seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { @@ -482,7 +626,10 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) already the last emited fence */ return -ENOENT; } - return radeon_fence_wait_seq(rdev, seq, false); + r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; + return 0; } /** @@ -498,18 +645,18 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; - int r; + long r; seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; if (!seq[ring]) return 0; - r = radeon_fence_wait_seq(rdev, seq, false); - if (r) { + r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { if (r == -EDEADLK) return -EDEADLK; - dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", + dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", ring, r); } return 0; @@ -525,7 +672,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) */ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) { - kref_get(&fence->kref); + fence_get(&fence->base); return fence; } @@ -542,7 +689,7 @@ void radeon_fence_unref(struct radeon_fence **fence) *fence = NULL; if (tmp) { - kref_put(&tmp->kref, radeon_fence_destroy); + fence_put(&tmp->base); } } @@ -711,6 +858,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].sync_seq[i] = 0; atomic64_set(&rdev->fence_drv[ring].last_seq, 0); rdev->fence_drv[ring].initialized = false; + INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, + radeon_fence_check_lockup); + rdev->fence_drv[ring].rdev = rdev; } /** @@ -758,8 +908,9 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) r = radeon_fence_wait_empty(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ - radeon_fence_driver_force_completion(rdev); + radeon_fence_driver_force_completion(rdev, ring); } + cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); wake_up_all(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false; @@ -771,18 +922,16 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) * radeon_fence_driver_force_completion - force all fence waiter to complete * * @rdev: radeon device pointer + * @ring: the ring to complete * * In case of GPU reset failure make sure no process keep waiting on fence * that will never complete. */ -void radeon_fence_driver_force_completion(struct radeon_device *rdev) +void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) { - int ring; - - for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { - if (!rdev->fence_drv[ring].initialized) - continue; + if (rdev->fence_drv[ring].initialized) { radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); + cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); } } @@ -833,6 +982,7 @@ static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) down_read(&rdev->exclusive_lock); seq_printf(m, "%d\n", rdev->needs_reset); rdev->needs_reset = true; + wake_up_all(&rdev->fence_queue); up_read(&rdev->exclusive_lock); return 0; @@ -852,3 +1002,72 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev) return 0; #endif } + +static const char *radeon_fence_get_driver_name(struct fence *fence) +{ + return "radeon"; +} + +static const char *radeon_fence_get_timeline_name(struct fence *f) +{ + struct radeon_fence *fence = to_radeon_fence(f); + switch (fence->ring) { + case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx"; + case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1"; + case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2"; + case R600_RING_TYPE_DMA_INDEX: return "radeon.dma"; + case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1"; + case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd"; + case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1"; + case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2"; + default: WARN_ON_ONCE(1); return "radeon.unk"; + } +} + +static inline bool radeon_test_signaled(struct radeon_fence *fence) +{ + return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); +} + +static signed long radeon_fence_default_wait(struct fence *f, bool intr, + signed long t) +{ + struct radeon_fence *fence = to_radeon_fence(f); + struct radeon_device *rdev = fence->rdev; + bool signaled; + + fence_enable_sw_signaling(&fence->base); + + /* + * This function has to return -EDEADLK, but cannot hold + * exclusive_lock during the wait because some callers + * may already hold it. This means checking needs_reset without + * lock, and not fiddling with any gpu internals. + * + * The callback installed with fence_enable_sw_signaling will + * run before our wait_event_*timeout call, so we will see + * both the signaled fence and the changes to needs_reset. + */ + + if (intr) + t = wait_event_interruptible_timeout(rdev->fence_queue, + ((signaled = radeon_test_signaled(fence)) || + rdev->needs_reset), t); + else + t = wait_event_timeout(rdev->fence_queue, + ((signaled = radeon_test_signaled(fence)) || + rdev->needs_reset), t); + + if (t > 0 && !signaled) + return -EDEADLK; + return t; +} + +const struct fence_ops radeon_fence_ops = { + .get_driver_name = radeon_fence_get_driver_name, + .get_timeline_name = radeon_fence_get_timeline_name, + .enable_signaling = radeon_fence_enable_signaling, + .signaled = radeon_fence_is_signaled, + .wait = radeon_fence_default_wait, + .release = NULL, +}; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a053a0779aa..84146d5901a 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &rdev->gart.robj); + 0, NULL, NULL, &rdev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bfd7e1b0ff3..c194497aa58 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, retry: r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, - flags, NULL, &robj); + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { @@ -94,7 +94,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, { struct radeon_bo *robj; uint32_t domain; - int r; + long r; /* FIXME: reeimplement */ robj = gem_to_radeon_bo(gobj); @@ -110,9 +110,12 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = radeon_bo_wait(robj, NULL, false); - if (r) { - printk(KERN_ERR "Failed to wait for object !\n"); + r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); + if (!r) + r = -EBUSY; + + if (r < 0 && r != -EINTR) { + printk(KERN_ERR "Failed to wait for object: %li\n", r); return r; } } @@ -272,6 +275,94 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_gem_userptr *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *bo; + uint32_t handle; + int r; + + if (offset_in_page(args->addr | args->size)) + return -EINVAL; + + /* reject unknown flag values */ + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | + RADEON_GEM_USERPTR_REGISTER)) + return -EINVAL; + + if (args->flags & RADEON_GEM_USERPTR_READONLY) { + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || + !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { + + /* if we want to write to it we must require anonymous + memory and install a MMU notifier */ + return -EACCES; + } + + down_read(&rdev->exclusive_lock); + + /* create a gem object to contain this object in */ + r = radeon_gem_object_create(rdev, args->size, 0, + RADEON_GEM_DOMAIN_CPU, 0, + false, &gobj); + if (r) + goto handle_lockup; + + bo = gem_to_radeon_bo(gobj); + r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + if (r) + goto release_object; + + if (args->flags & RADEON_GEM_USERPTR_REGISTER) { + r = radeon_mn_register(bo, args->addr); + if (r) + goto release_object; + } + + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { + down_read(¤t->mm->mmap_sem); + r = radeon_bo_reserve(bo, true); + if (r) { + up_read(¤t->mm->mmap_sem); + goto release_object; + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + radeon_bo_unreserve(bo); + up_read(¤t->mm->mmap_sem); + if (r) + goto release_object; + } + + r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); + if (r) + goto handle_lockup; + + args->handle = handle; + up_read(&rdev->exclusive_lock); + return 0; + +release_object: + drm_gem_object_unreference_unlocked(gobj); + +handle_lockup: + up_read(&rdev->exclusive_lock); + r = radeon_gem_handle_lockup(rdev, r); + + return r; +} + int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -315,6 +406,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EPERM; + } *offset_p = radeon_bo_mmap_offset(robj); drm_gem_object_unreference_unlocked(gobj); return 0; @@ -357,15 +452,22 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_radeon_gem_wait_idle *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; - int r; + int r = 0; uint32_t cur_placement = 0; + long ret; gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, &cur_placement, false); + + ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); + if (ret == 0) + r = -EBUSY; + else if (ret < 0) + r = ret; + /* Flush HDP cache via MMIO if necessary */ if (rdev->asic->mmio_hdp_flush && radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) @@ -532,6 +634,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + + r = -EPERM; + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + goto out; + r = radeon_bo_reserve(robj, false); if (unlikely(r)) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 5bf2c0a0582..3f39fcca4d0 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, if (ib->vm) { struct radeon_fence *vm_id_fence; vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); - radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); + radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence); } /* sync with other rings */ @@ -269,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) r = radeon_ib_test(rdev, i, ring); if (r) { + radeon_fence_driver_force_completion(rdev, i); ring->ready = false; rdev->needs_reset = false; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 16807afab36..7784911d78e 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -88,23 +88,6 @@ static void radeon_hotplug_work_func(struct work_struct *work) } /** - * radeon_irq_reset_work_func - execute gpu reset - * - * @work: work struct - * - * Execute scheduled gpu reset (cayman+). - * This function is called when the irq handler - * thinks we need a gpu reset. - */ -static void radeon_irq_reset_work_func(struct work_struct *work) -{ - struct radeon_device *rdev = container_of(work, struct radeon_device, - reset_work); - - radeon_gpu_reset(rdev); -} - -/** * radeon_driver_irq_preinstall_kms - drm irq preinstall callback * * @dev: drm dev pointer @@ -284,7 +267,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev) INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); rdev->irq.installed = true; r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); @@ -342,6 +324,21 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) } /** + * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt + * + * @rdev: radeon device pointer + * @ring: ring whose interrupt you want to enable + * + * Enables the software interrupt for a specific ring (all asics). + * The software interrupt is generally used to signal a fence on + * a particular ring. + */ +bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring) +{ + return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1; +} + +/** * radeon_irq_kms_sw_irq_put - disable software interrupt * * @rdev: radeon device pointer diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index eb7164d0798..8309b11e674 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 00000000000..a69bd441dd2 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -0,0 +1,274 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: + * Christian König <christian.koenig@amd.com> + */ + +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/mmu_notifier.h> +#include <drm/drmP.h> +#include <drm/drm.h> + +#include "radeon.h" + +struct radeon_mn { + /* constant after initialisation */ + struct radeon_device *rdev; + struct mm_struct *mm; + struct mmu_notifier mn; + + /* only used on destruction */ + struct work_struct work; + + /* protected by rdev->mn_lock */ + struct hlist_node node; + + /* objects protected by lock */ + struct mutex lock; + struct rb_root objects; +}; + +/** + * radeon_mn_destroy - destroy the rmn + * + * @work: previously sheduled work item + * + * Lazy destroys the notifier from a work item + */ +static void radeon_mn_destroy(struct work_struct *work) +{ + struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); + struct radeon_device *rdev = rmn->rdev; + struct radeon_bo *bo, *next; + + mutex_lock(&rdev->mn_lock); + mutex_lock(&rmn->lock); + hash_del(&rmn->node); + rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + } + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); + mmu_notifier_unregister(&rmn->mn, rmn->mm); + kfree(rmn); +} + +/** + * radeon_mn_release - callback to notify about mm destruction + * + * @mn: our notifier + * @mn: the mm this callback is about + * + * Shedule a work item to lazy destroy our notifier. + */ +static void radeon_mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + INIT_WORK(&rmn->work, radeon_mn_destroy); + schedule_work(&rmn->work); +} + +/** + * radeon_mn_invalidate_range_start - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * We block for all BOs between start and end to be idle and + * unmap them by move them into system domain again. + */ +static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + struct interval_tree_node *it; + + /* notification is exclusive, but interval is inclusive */ + end -= 1; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, start, end); + while (it) { + struct radeon_bo *bo; + struct fence *fence; + int r; + + bo = container_of(it, struct radeon_bo, mn_it); + it = interval_tree_iter_next(it, start, end); + + r = radeon_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%d) failed to reserve user bo\n", r); + continue; + } + + fence = reservation_object_get_excl(bo->tbo.resv); + if (fence) { + r = radeon_fence_wait((struct radeon_fence *)fence, false); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%d) failed to validate user bo\n", r); + + radeon_bo_unreserve(bo); + } + + mutex_unlock(&rmn->lock); +} + +static const struct mmu_notifier_ops radeon_mn_ops = { + .release = radeon_mn_release, + .invalidate_range_start = radeon_mn_invalidate_range_start, +}; + +/** + * radeon_mn_get - create notifier context + * + * @rdev: radeon device pointer + * + * Creates a notifier context for current->mm. + */ +static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) +{ + struct mm_struct *mm = current->mm; + struct radeon_mn *rmn; + int r; + + down_write(&mm->mmap_sem); + mutex_lock(&rdev->mn_lock); + + hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) + if (rmn->mm == mm) + goto release_locks; + + rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); + if (!rmn) { + rmn = ERR_PTR(-ENOMEM); + goto release_locks; + } + + rmn->rdev = rdev; + rmn->mm = mm; + rmn->mn.ops = &radeon_mn_ops; + mutex_init(&rmn->lock); + rmn->objects = RB_ROOT; + + r = __mmu_notifier_register(&rmn->mn, mm); + if (r) + goto free_rmn; + + hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); + +release_locks: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + + return rmn; + +free_rmn: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + kfree(rmn); + + return ERR_PTR(r); +} + +/** + * radeon_mn_register - register a BO for notifier updates + * + * @bo: radeon buffer object + * @addr: userptr addr we should monitor + * + * Registers an MMU notifier for the given BO at the specified address. + * Returns 0 on success, -ERRNO if anything goes wrong. + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) +{ + unsigned long end = addr + radeon_bo_size(bo) - 1; + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + struct interval_tree_node *it; + + rmn = radeon_mn_get(rdev); + if (IS_ERR(rmn)) + return PTR_ERR(rmn); + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, addr, end); + if (it) { + mutex_unlock(&rmn->lock); + return -EEXIST; + } + + bo->mn = rmn; + bo->mn_it.start = addr; + bo->mn_it.last = end; + interval_tree_insert(&bo->mn_it, &rmn->objects); + + mutex_unlock(&rmn->lock); + + return 0; +} + +/** + * radeon_mn_unregister - unregister a BO for notifier updates + * + * @bo: radeon buffer object + * + * Remove any registration of MMU notifier updates from the buffer object. + */ +void radeon_mn_unregister(struct radeon_bo *bo) +{ + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + + mutex_lock(&rdev->mn_lock); + rmn = bo->mn; + if (rmn == NULL) { + mutex_unlock(&rdev->mn_lock); + return; + } + + mutex_lock(&rmn->lock); + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); +} diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e27608c29c1..04db2fdd869 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -777,6 +777,7 @@ extern void atombios_digital_setup(struct drm_encoder *encoder, int action); extern int atombios_get_encoder_mode(struct drm_encoder *encoder); extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); +extern bool radeon_encoder_is_digital(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 480c87d8edc..99a960a4f30 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo); radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); + radeon_mn_unregister(bo); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); @@ -96,55 +97,80 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { u32 c = 0, i; - rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_TT; } } + if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_SYSTEM; } } if (!c) - rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + rbo->placements[i].fpfn = 0; + if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && + (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) + rbo->placements[i].lpfn = + rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + rbo->placements[i].lpfn = 0; + } + /* * Use two-ended allocation depending on the buffer size to * improve fragmentation quality. * 512kb was measured as the most optimal number. */ - if (rbo->tbo.mem.size > 512 * 1024) { + if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && + (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && + rbo->tbo.mem.size > 512 * 1024) { for (i = 0; i < c; i++) { - rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; + rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; } } } int radeon_bo_create(struct radeon_device *rdev, - unsigned long size, int byte_align, bool kernel, u32 domain, - u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) + unsigned long size, int byte_align, bool kernel, + u32 domain, u32 flags, struct sg_table *sg, + struct reservation_object *resv, + struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; @@ -192,7 +218,7 @@ int radeon_bo_create(struct radeon_device *rdev, down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, &radeon_ttm_bo_destroy); + acc_size, sg, resv, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; @@ -264,6 +290,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, { int r, i; + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return -EPERM; + if (bo->pin_count) { bo->pin_count++; if (gpu_addr) @@ -283,21 +312,19 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, return 0; } radeon_ttm_placement_from_domain(bo, domain); - if (domain == RADEON_GEM_DOMAIN_VRAM) { + for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - } - if (max_offset) { - u64 lpfn = max_offset >> PAGE_SHIFT; - - if (!bo->placement.lpfn) - bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; + if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && + (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) + bo->placements[i].lpfn = + bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - if (lpfn < bo->placement.lpfn) - bo->placement.lpfn = lpfn; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; @@ -329,8 +356,10 @@ int radeon_bo_unpin(struct radeon_bo *bo) bo->pin_count--; if (bo->pin_count) return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + for (i = 0; i < bo->placement.num_placement; i++) { + bo->placements[i].lpfn = 0; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) @@ -459,7 +488,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); - r = ttm_eu_reserve_buffers(ticket, head); + r = ttm_eu_reserve_buffers(ticket, head, true); if (unlikely(r != 0)) { return r; } @@ -468,6 +497,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, bo = lobj->robj; if (!bo->pin_count) { u32 domain = lobj->prefered_domains; + u32 allowed = lobj->allowed_domains; u32 current_domain = radeon_mem_type_to_domain(bo->tbo.mem.mem_type); @@ -479,7 +509,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, * into account. We don't want to disallow buffer moves * completely. */ - if ((lobj->allowed_domains & current_domain) != 0 && + if ((allowed & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -489,7 +519,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, retry: radeon_ttm_placement_from_domain(bo, domain); if (ring == R600_RING_TYPE_UVD_INDEX) - radeon_uvd_force_into_uvd_segment(bo); + radeon_uvd_force_into_uvd_segment(bo, allowed); initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); @@ -731,7 +761,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; r = ttm_bo_validate(bo, &rbo->placement, false, false); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); @@ -755,12 +785,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) return r; - spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; - if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.bdev->fence_lock); + + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 98a47fdf362..1b8ec791715 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -126,6 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, + struct reservation_object *resv, struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 164898b0010..32522cc940a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1556,7 +1556,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) if (rdev->pm.active_crtcs & (1 << crtc)) { vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); if ((vbl_status & DRM_SCANOUTPOS_VALID) && - !(vbl_status & DRM_SCANOUTPOS_INVBL)) + !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) in_vbl = false; } } diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 0b16f2cbcf1..f3609c97496 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -27,6 +27,7 @@ #include "radeon.h" #include <drm/radeon_drm.h> +#include <linux/dma-buf.h> struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) { @@ -57,15 +58,18 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) } struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, - size_t size, + struct dma_buf_attachment *attach, struct sg_table *sg) { + struct reservation_object *resv = attach->dmabuf->resv; struct radeon_device *rdev = dev->dev_private; struct radeon_bo *bo; int ret; - ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, - RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); + ww_mutex_lock(&resv->lock, NULL); + ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, + RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); @@ -111,3 +115,13 @@ struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj) return bo->tbo.resv; } + +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags) +{ + struct radeon_bo *bo = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return ERR_PTR(-EPERM); + return drm_gem_prime_export(dev, gobj, flags); +} diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index d6560790253..3d17af34afa 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -45,27 +45,6 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); /** - * radeon_ring_write - write a value to the ring - * - * @ring: radeon_ring structure holding ring information - * @v: dword (dw) value to write - * - * Write a value to the requested ring buffer (all asics). - */ -void radeon_ring_write(struct radeon_ring *ring, uint32_t v) -{ -#if DRM_DEBUG_CODE - if (ring->count_dw <= 0) { - DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); - } -#endif - ring->ring[ring->wptr++] = v; - ring->wptr &= ring->ptr_mask; - ring->count_dw--; - ring->ring_free_dw--; -} - -/** * radeon_ring_supports_scratch_reg - check if the ring supports * writing to scratch registers * @@ -404,7 +383,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig /* Allocate ring buffer */ if (ring->ring_obj == NULL) { r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &ring->ring_obj); if (r) { dev_err(rdev->dev, "(%d) ring create failed\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index b84f97c8718..c507896aca4 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, } r = radeon_bo_create(rdev, size, align, true, - domain, flags, NULL, &sa_manager->bo); + domain, flags, NULL, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index abd6753a570..6deb08f045b 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, } /** - * radeon_semaphore_sync_to - use the semaphore to sync to a fence + * radeon_semaphore_sync_fence - use the semaphore to sync to a fence * * @semaphore: semaphore object to add fence to * @fence: fence to sync to * * Sync to the fence using this semaphore object */ -void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, - struct radeon_fence *fence) +void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore, + struct radeon_fence *fence) { struct radeon_fence *other; @@ -116,6 +116,53 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, } /** + * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object + * + * @sema: semaphore object to add fence from reservation object to + * @resv: reservation object with embedded fence + * @shared: true if we should onyl sync to the exclusive fence + * + * Sync to the fence using this semaphore object + */ +int radeon_semaphore_sync_resv(struct radeon_device *rdev, + struct radeon_semaphore *sema, + struct reservation_object *resv, + bool shared) +{ + struct reservation_object_list *flist; + struct fence *f; + struct radeon_fence *fence; + unsigned i; + int r = 0; + + /* always sync to the exclusive fence */ + f = reservation_object_get_excl(resv); + fence = f ? to_radeon_fence(f) : NULL; + if (fence && fence->rdev == rdev) + radeon_semaphore_sync_fence(sema, fence); + else if (f) + r = fence_wait(f, true); + + flist = reservation_object_get_list(resv); + if (shared || !flist || r) + return r; + + for (i = 0; i < flist->shared_count; ++i) { + f = rcu_dereference_protected(flist->shared[i], + reservation_object_held(resv)); + fence = to_radeon_fence(f); + if (fence && fence->rdev == rdev) + radeon_semaphore_sync_fence(sema, fence); + else + r = fence_wait(f, true); + + if (r) + break; + } + return r; +} + +/** * radeon_semaphore_sync_rings - sync ring to all registered fences * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 23bb64fd775..535403e0c8a 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -30,9 +30,9 @@ */ #include <drm/drmP.h> -#include <drm/drm_buffer.h> #include <drm/radeon_drm.h> #include "radeon_drv.h" +#include "drm_buffer.h" /* ================================================================ * Helper functions for client state checking and fixup diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 17bc3dced9f..07b506b4100 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -67,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) } r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &vram_obj); + 0, NULL, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -87,7 +87,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) struct radeon_fence *fence = NULL; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, + gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; @@ -116,11 +117,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) radeon_bo_kunmap(gtt_obj[i]); if (ring == R600_RING_TYPE_DMA_INDEX) - r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); + fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); else - r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); - if (r) { + fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); + if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); + r = PTR_ERR(fence); goto out_lclean_unpin; } @@ -162,11 +168,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) radeon_bo_kunmap(vram_obj); if (ring == R600_RING_TYPE_DMA_INDEX) - r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); + fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); else - r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); - if (r) { + fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); + if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); + r = PTR_ERR(fence); goto out_lclean_unpin; } @@ -222,7 +233,7 @@ out_lclean: radeon_bo_unreserve(gtt_obj[i]); radeon_bo_unref(>t_obj[i]); } - if (fence) + if (fence && !IS_ERR(fence)) radeon_fence_unref(&fence); break; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 72afe82a95c..8624979afb6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -39,6 +39,8 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/swiotlb.h> +#include <linux/swap.h> +#include <linux/pagemap.h> #include <linux/debugfs.h> #include "radeon_reg.h" #include "radeon.h" @@ -176,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { + static struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + }; + struct radeon_bo *rbo; - static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!radeon_ttm_bo_is_radeon_bo(bo)) { - placement->fpfn = 0; - placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; @@ -228,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; + unsigned num_pages; int r, ridx; rdev = radeon_get_rdev(bo->bdev); @@ -264,13 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); - /* sync other rings */ - fence = bo->sync_obj; - r = radeon_copy(rdev, old_start, new_start, - new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ - &fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, (void *)fence, + num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); + fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); + if (IS_ERR(fence)) + return PTR_ERR(fence); + + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; @@ -284,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; - u32 placements; + struct ttm_place placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { @@ -332,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; - u32 placements; + struct ttm_place placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { @@ -483,39 +488,108 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) -{ - return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); -} +/* + * TTM backend functions. + */ +struct radeon_ttm_tt { + struct ttm_dma_tt ttm; + struct radeon_device *rdev; + u64 offset; -static int radeon_sync_obj_flush(void *sync_obj) + uint64_t userptr; + struct mm_struct *usermm; + uint32_t userflags; +}; + +/* prepare the sg table with the user pages */ +static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) { + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + unsigned pinned = 0, nents; + int r; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (current->mm != gtt->usermm) + return -EPERM; + + if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { + /* check that we only pin down anonymous memory + to prevent problems with writeback */ + unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + struct vm_area_struct *vma; + vma = find_vma(gtt->usermm, gtt->userptr); + if (!vma || vma->vm_file || vma->vm_end < end) + return -EPERM; + } + + do { + unsigned num_pages = ttm->num_pages - pinned; + uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; + struct page **pages = ttm->pages + pinned; + + r = get_user_pages(current, current->mm, userptr, num_pages, + write, 0, pages, NULL); + if (r < 0) + goto release_pages; + + pinned += r; + + } while (pinned < ttm->num_pages); + + r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, + ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (r) + goto release_sg; + + r = -ENOMEM; + nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + if (nents != ttm->sg->nents) + goto release_sg; + + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + gtt->ttm.dma_address, ttm->num_pages); + return 0; -} -static void radeon_sync_obj_unref(void **sync_obj) -{ - radeon_fence_unref((struct radeon_fence **)sync_obj); -} +release_sg: + kfree(ttm->sg); -static void *radeon_sync_obj_ref(void *sync_obj) -{ - return radeon_fence_ref((struct radeon_fence *)sync_obj); +release_pages: + release_pages(ttm->pages, pinned, 0); + return r; } -static bool radeon_sync_obj_signaled(void *sync_obj) +static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { - return radeon_fence_signaled((struct radeon_fence *)sync_obj); -} + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + struct scatterlist *sg; + int i; -/* - * TTM backend functions. - */ -struct radeon_ttm_tt { - struct ttm_dma_tt ttm; - struct radeon_device *rdev; - u64 offset; -}; + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + /* free the sg table and pages again */ + dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + + for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { + struct page *page = sg_page(sg); + + if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) + set_page_dirty(page); + + mark_page_accessed(page); + page_cache_release(page); + } + + sg_free_table(ttm->sg); +} static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) @@ -525,6 +599,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, RADEON_GART_PAGE_WRITE; int r; + if (gtt->userptr) { + radeon_ttm_tt_pin_userptr(ttm); + flags &= ~RADEON_GART_PAGE_WRITE; + } + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -547,6 +626,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) struct radeon_ttm_tt *gtt = (void *)ttm; radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); + + if (gtt->userptr) + radeon_ttm_tt_unpin_userptr(ttm); + return 0; } @@ -592,10 +675,17 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, return >t->ttm.ttm; } +static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) +{ + if (!ttm || ttm->func != &radeon_backend_func) + return NULL; + return (struct radeon_ttm_tt *)ttm; +} + static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -603,6 +693,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0; + if (gtt && gtt->userptr) { + ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + if (!ttm->sg) + return -ENOMEM; + + ttm->page_flags |= TTM_PAGE_FLAG_SG; + ttm->state = tt_unbound; + return 0; + } + if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -648,10 +748,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = (void *)ttm; + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + if (gtt && gtt->userptr) { + kfree(ttm->sg); + ttm->page_flags &= ~TTM_PAGE_FLAG_SG; + return; + } + if (slave) return; @@ -680,6 +786,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } +int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags) +{ + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + + if (gtt == NULL) + return -EINVAL; + + gtt->userptr = addr; + gtt->usermm = current->mm; + gtt->userflags = flags; + return 0; +} + +bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + + if (gtt == NULL) + return false; + + return !!gtt->userptr; +} + +bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + + if (gtt == NULL) + return false; + + return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, @@ -689,11 +829,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, - .sync_obj_signaled = &radeon_sync_obj_signaled, - .sync_obj_wait = &radeon_sync_obj_wait, - .sync_obj_flush = &radeon_sync_obj_flush, - .sync_obj_unref = &radeon_sync_obj_unref, - .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &radeon_bo_move_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, @@ -730,7 +865,7 @@ int radeon_ttm_init(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &rdev->stollen_vga_memory); if (r) { return r; @@ -828,7 +963,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) int r; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { - return drm_mmap(filp, vma); + return -EINVAL; } file_priv = filp->private_data; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 341848a1437..11b66246925 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -40,12 +40,18 @@ #define UVD_IDLE_TIMEOUT_MS 1000 /* Firmware Names */ +#define FIRMWARE_R600 "radeon/R600_uvd.bin" +#define FIRMWARE_RS780 "radeon/RS780_uvd.bin" +#define FIRMWARE_RV770 "radeon/RV770_uvd.bin" #define FIRMWARE_RV710 "radeon/RV710_uvd.bin" #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin" +MODULE_FIRMWARE(FIRMWARE_R600); +MODULE_FIRMWARE(FIRMWARE_RS780); +MODULE_FIRMWARE(FIRMWARE_RV770); MODULE_FIRMWARE(FIRMWARE_RV710); MODULE_FIRMWARE(FIRMWARE_CYPRESS); MODULE_FIRMWARE(FIRMWARE_SUMO); @@ -63,6 +69,23 @@ int radeon_uvd_init(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); switch (rdev->family) { + case CHIP_RV610: + case CHIP_RV630: + case CHIP_RV670: + case CHIP_RV620: + case CHIP_RV635: + fw_name = FIRMWARE_R600; + break; + + case CHIP_RS780: + case CHIP_RS880: + fw_name = FIRMWARE_RS780; + break; + + case CHIP_RV770: + fw_name = FIRMWARE_RV770; + break; + case CHIP_RV710: case CHIP_RV730: case CHIP_RV740: @@ -115,9 +138,11 @@ int radeon_uvd_init(struct radeon_device *rdev) } bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + - RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE + + RADEON_GPU_PAGE_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + NULL, &rdev->uvd.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -231,10 +256,30 @@ int radeon_uvd_resume(struct radeon_device *rdev) return 0; } -void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) +void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, + uint32_t allowed_domains) { - rbo->placement.fpfn = 0 >> PAGE_SHIFT; - rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + int i; + + for (i = 0; i < rbo->placement.num_placement; ++i) { + rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; + rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + } + + /* If it must be in VRAM it must be in the first segment as well */ + if (allowed_domains == RADEON_GEM_DOMAIN_VRAM) + return; + + /* abort if we already have more than one placement */ + if (rbo->placement.num_placement > 1) + return; + + /* add another 256MB segment */ + rbo->placements[1] = rbo->placements[0]; + rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; + rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; + rbo->placement.num_placement++; + rbo->placement.num_busy_placement++; } void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) @@ -356,6 +401,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; + struct fence *f; void *ptr; int i, r; @@ -365,8 +411,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (bo->tbo.sync_obj) { - r = radeon_fence_wait(bo->tbo.sync_obj, false); + f = reservation_object_get_excl(bo->tbo.resv); + if (f) { + r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); return r; @@ -604,38 +651,16 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) } static int radeon_uvd_send_msg(struct radeon_device *rdev, - int ring, struct radeon_bo *bo, + int ring, uint64_t addr, struct radeon_fence **fence) { - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - struct list_head head; struct radeon_ib ib; - uint64_t addr; int i, r; - memset(&tv, 0, sizeof(tv)); - tv.bo = &bo->tbo; - - INIT_LIST_HEAD(&head); - list_add(&tv.head, &head); - - r = ttm_eu_reserve_buffers(&ticket, &head); - if (r) - return r; - - radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); - radeon_uvd_force_into_uvd_segment(bo); - - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - if (r) - goto err; - r = radeon_ib_get(rdev, ring, &ib, NULL, 64); if (r) - goto err; + return r; - addr = radeon_bo_gpu_offset(bo); ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); ib.ptr[1] = addr; ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); @@ -647,19 +672,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL, false); - if (r) - goto err; - ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); if (fence) *fence = radeon_fence_ref(ib.fence); radeon_ib_free(rdev, &ib); - radeon_bo_unref(&bo); - return 0; - -err: - ttm_eu_backoff_reservation(&ticket, &head); return r; } @@ -669,27 +686,18 @@ err: int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence) { - struct radeon_bo *bo; - uint32_t *msg; - int r, i; + /* we use the last page of the vcpu bo for the UVD message */ + uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - + RADEON_GPU_PAGE_SIZE; - r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); - if (r) - return r; + uint32_t *msg = rdev->uvd.cpu_addr + offs; + uint64_t addr = rdev->uvd.gpu_addr + offs; - r = radeon_bo_reserve(bo, false); - if (r) { - radeon_bo_unref(&bo); - return r; - } + int r, i; - r = radeon_bo_kmap(bo, (void **)&msg); - if (r) { - radeon_bo_unreserve(bo); - radeon_bo_unref(&bo); + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); + if (r) return r; - } /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); @@ -706,36 +714,26 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, for (i = 11; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - radeon_bo_kunmap(bo); - radeon_bo_unreserve(bo); - - return radeon_uvd_send_msg(rdev, ring, bo, fence); + r = radeon_uvd_send_msg(rdev, ring, addr, fence); + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + return r; } int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence) { - struct radeon_bo *bo; - uint32_t *msg; - int r, i; + /* we use the last page of the vcpu bo for the UVD message */ + uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - + RADEON_GPU_PAGE_SIZE; - r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); - if (r) - return r; + uint32_t *msg = rdev->uvd.cpu_addr + offs; + uint64_t addr = rdev->uvd.gpu_addr + offs; - r = radeon_bo_reserve(bo, false); - if (r) { - radeon_bo_unref(&bo); - return r; - } + int r, i; - r = radeon_bo_kmap(bo, (void **)&msg); - if (r) { - radeon_bo_unreserve(bo); - radeon_bo_unref(&bo); + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); + if (r) return r; - } /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); @@ -745,10 +743,9 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - radeon_bo_kunmap(bo); - radeon_bo_unreserve(bo); - - return radeon_uvd_send_msg(rdev, ring, bo, fence); + r = radeon_uvd_send_msg(rdev, ring, addr, fence); + radeon_bo_unreserve(rdev->uvd.vcpu_bo); + return r; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index c7190aadbd8..9e85757d559 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -126,7 +126,8 @@ int radeon_vce_init(struct radeon_device *rdev) size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, + &rdev->vce.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 088ffdc2f57..4532cc76a0a 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].tv.bo = &vm->page_directory->tbo; + list[0].tv.shared = false; list[0].tiling_flags = 0; list[0].handle = 0; list_add(&list[0].tv.head, head); @@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].tv.bo = &list[idx].robj->tbo; + list[idx].tv.shared = false; list[idx].tiling_flags = 0; list[idx].handle = 0; list_add(&list[idx++].tv.head, head); @@ -395,11 +397,12 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, memset(&tv, 0, sizeof(tv)); tv.bo = &bo->tbo; + tv.shared = false; INIT_LIST_HEAD(&head); list_add(&tv.head, &head); - r = ttm_eu_reserve_buffers(&ticket, &head); + r = ttm_eu_reserve_buffers(&ticket, &head, true); if (r) return r; @@ -424,7 +427,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, if (r) goto error; - ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); + ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); radeon_ib_free(rdev, &ib); return 0; @@ -545,7 +548,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt); + RADEON_GEM_DOMAIN_VRAM, 0, + NULL, NULL, &pt); if (r) return r; @@ -694,8 +698,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, if (ib.length_dw != 0) { radeon_asic_vm_pad_ib(rdev, &ib); - radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); - radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); + + radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false); + radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use); WARN_ON(ib.length_dw > ndw); r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { @@ -821,7 +826,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, unsigned nptes; uint64_t pte; - radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); + radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false); if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -892,6 +897,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; + if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) + bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; + if (mem) { addr = mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_SYSTEM) { @@ -960,7 +968,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, radeon_asic_vm_pad_ib(rdev, &ib); WARN_ON(ib.length_dw > ndw); - radeon_semaphore_sync_to(ib.semaphore, vm->fence); + radeon_semaphore_sync_fence(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); @@ -1120,7 +1128,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_create(rdev, pd_size, align, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &vm->page_directory); + NULL, &vm->page_directory); if (r) return r; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index d9f5ce715c9..372016e266d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -26,7 +26,6 @@ * Jerome Glisse */ #include <linux/firmware.h> -#include <linux/platform_device.h> #include <linux/slab.h> #include <drm/drmP.h> #include "radeon.h" diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index 74426ac2bb5..7f34bad2e72 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -33,18 +33,19 @@ * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (r7xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int rv770_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3a0b973e8a9..eeea5b6a177 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4684,7 +4684,7 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev, int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) { int ret = 0; - u32 idx = 0; + u32 idx = 0, i; struct radeon_cs_packet pkt; do { @@ -4695,6 +4695,12 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) switch (pkt.type) { case RADEON_PACKET_TYPE0: dev_err(rdev->dev, "Packet0 not allowed!\n"); + for (i = 0; i < ib->length_dw; i++) { + if (i == idx) + printk("\t0x%08x <---\n", ib->ptr[i]); + else + printk("\t0x%08x\n", ib->ptr[i]); + } ret = -EINVAL; break; case RADEON_PACKET_TYPE2: diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 7c22baaf94d..b58f12b762d 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (SI). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes; @@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 70e61ffeace..9e4d5d7d348 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2916,7 +2916,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -2950,29 +2949,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } } - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - for (i = 0; i < ps->performance_level_count; i++) { - if (max_sclk_vddc) { - if (ps->performance_levels[i].sclk > max_sclk_vddc) - ps->performance_levels[i].sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->performance_levels[i].mclk > max_mclk_vddci) - ps->performance_levels[i].mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->performance_levels[i].mclk > max_mclk_vddc) - ps->performance_levels[i].mclk = max_mclk_vddc; - } - } - /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index fd414d34d88..6635da9ec98 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -736,7 +736,7 @@ # define DESCRIPTION16(x) (((x) & 0xff) << 0) # define DESCRIPTION17(x) (((x) & 0xff) << 8) -#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 +#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54 # define AUDIO_ENABLED (1 << 31) #define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56 diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index cda39134728..e72b3cb5935 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -22,6 +22,7 @@ * Authors: Christian König <christian.koenig@amd.com> */ +#include <linux/firmware.h> #include <drm/drmP.h> #include "radeon.h" #include "radeon_asic.h" @@ -70,6 +71,82 @@ void uvd_v1_0_set_wptr(struct radeon_device *rdev, } /** + * uvd_v1_0_fence_emit - emit an fence & trap command + * + * @rdev: radeon_device pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +void uvd_v1_0_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); + radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); + radeon_ring_write(ring, fence->seq); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); + radeon_ring_write(ring, 0); + + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); + radeon_ring_write(ring, 2); + return; +} + +/** + * uvd_v1_0_resume - memory controller programming + * + * @rdev: radeon_device pointer + * + * Let the UVD memory controller know it's offsets + */ +int uvd_v1_0_resume(struct radeon_device *rdev) +{ + uint64_t addr; + uint32_t size; + int r; + + r = radeon_uvd_resume(rdev); + if (r) + return r; + + /* programm the VCPU memory controller bits 0-27 */ + addr = (rdev->uvd.gpu_addr >> 3) + 16; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET0, addr); + WREG32(UVD_VCPU_CACHE_SIZE0, size); + + addr += size; + size = RADEON_UVD_STACK_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET1, addr); + WREG32(UVD_VCPU_CACHE_SIZE1, size); + + addr += size; + size = RADEON_UVD_HEAP_SIZE >> 3; + WREG32(UVD_VCPU_CACHE_OFFSET2, addr); + WREG32(UVD_VCPU_CACHE_SIZE2, size); + + /* bits 28-31 */ + addr = (rdev->uvd.gpu_addr >> 28) & 0xF; + WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); + + /* bits 32-39 */ + addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; + WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + + WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr)); + + return 0; +} + +/** * uvd_v1_0_init - start and test UVD block * * @rdev: radeon_device pointer @@ -130,8 +207,32 @@ done: /* lower clocks again */ radeon_set_uvd_clocks(rdev, 0, 0); - if (!r) + if (!r) { + switch (rdev->family) { + case CHIP_RV610: + case CHIP_RV630: + case CHIP_RV620: + /* 64byte granularity workaround */ + WREG32(MC_CONFIG, 0); + WREG32(MC_CONFIG, 1 << 4); + WREG32(RS_DQ_RD_RET_CONF, 0x3f); + WREG32(MC_CONFIG, 0x1f); + + /* fall through */ + case CHIP_RV670: + case CHIP_RV635: + + /* write clean workaround */ + WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10); + break; + + default: + /* TODO: Do we need more? */ + break; + } + DRM_INFO("UVD initialized successfully.\n"); + } return r; } @@ -218,12 +319,12 @@ int uvd_v1_0_start(struct radeon_device *rdev) /* enable UMC */ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); + /* boot up the VCPU */ WREG32(UVD_SOFT_RESET, 0); mdelay(10); - WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); - for (i = 0; i < 10; ++i) { uint32_t status; for (j = 0; j < 100; ++j) { diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 8bfdadd5659..89193519f8a 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -72,6 +72,10 @@ int uvd_v2_2_resume(struct radeon_device *rdev) uint32_t chip_id, size; int r; + /* RV770 uses V1.0 MC */ + if (rdev->family == CHIP_RV770) + return uvd_v1_0_resume(rdev); + r = radeon_uvd_resume(rdev); if (r) return r; |