@@ -614,14 +614,16 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
+ if (!adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_is_a619_holi(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
+ if (!adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_is_a619_holi(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@@ -814,8 +816,8 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a618(adreno_gpu))
return;
- if (adreno_is_a619(adreno_gpu)) {
- /* HBB = 14 */
+ if (adreno_is_a619(adreno_gpu) && !adreno_is_a619_holi(adreno_gpu)) {
+ /* HBB = 14 on A619, default 0 on A619_holi */
hbb_lo = 1;
}
@@ -1029,7 +1031,12 @@ static int hw_init(struct msm_gpu *gpu)
}
/* Clear GBIF halt in case GX domain was not collapsed */
- if (a6xx_has_gbif(adreno_gpu)) {
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_write(gpu, 0x18, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ } else if (a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
/* Let's make extra sure that the GPU can access the memory.. */
@@ -1038,6 +1045,9 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
/*
* Disable the trusted memory range - we don't actually supported secure
* memory rendering at this point in time and we don't want to block off
@@ -1315,7 +1325,8 @@ static void a6xx_dump(struct msm_gpu *gpu)
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
#define VBIF_RESET_ACK_TIMEOUT 100
-#define VBIF_RESET_ACK_MASK 0x00f0
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
static void a6xx_recover(struct msm_gpu *gpu)
{
@@ -1372,10 +1383,16 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* Software-reset the GPU */
if (adreno_has_gmu_wrapper(adreno_gpu)) {
- /* Halt the GX side of GBIF */
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, GBIF_GX_HALT_MASK);
- spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) &
- GBIF_GX_HALT_MASK);
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, 0x18, GPR0_GBIF_HALT_REQUEST);
+ spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
+ (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
+ } else {
+ /* Halt the GX side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, GBIF_GX_HALT_MASK);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) &
+ GBIF_GX_HALT_MASK);
+ }
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -1797,6 +1814,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
if (ret)
goto err;
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
err:
mutex_unlock(&a6xx_gpu->gmu.lock);
@@ -1851,6 +1871,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
mutex_lock(&a6xx_gpu->gmu.lock);
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_disable(gmu);
+
clk_disable_unprepare(gpu->ebi1_clk);
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
@@ -252,6 +252,11 @@ static inline int adreno_is_a619(struct adreno_gpu *gpu)
return gpu->revn == 619;
}
+static inline int adreno_is_a619_holi(struct adreno_gpu *gpu)
+{
+ return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
+}
+
static inline int adreno_is_a630(struct adreno_gpu *gpu)
{
return gpu->revn == 630;