|
|
|
|
|
|
|
|
static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs, |
|
|
static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs, |
|
|
enum ring_type ring_type) |
|
|
enum ring_type ring_type) |
|
|
{ |
|
|
{ |
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
switch (ring_type) { |
|
|
switch (ring_type) { |
|
|
case RING_DMA: |
|
|
case RING_DMA: |
|
|
cs->request.ip_type = AMDGPU_HW_IP_DMA; |
|
|
cs->request.ip_type = AMDGPU_HW_IP_DMA; |
|
|
|
|
|
|
|
|
break; |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) { |
|
|
|
|
|
cs->buffer_indices_hashlist[i] = -1; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist)); |
|
|
cs->last_added_bo = NULL; |
|
|
cs->last_added_bo = NULL; |
|
|
|
|
|
|
|
|
cs->request.number_of_ibs = 1; |
|
|
cs->request.number_of_ibs = 1; |
|
|
|
|
|
|
|
|
cs->num_sparse_buffers = 0; |
|
|
cs->num_sparse_buffers = 0; |
|
|
amdgpu_fence_reference(&cs->fence, NULL); |
|
|
amdgpu_fence_reference(&cs->fence, NULL); |
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) { |
|
|
|
|
|
cs->buffer_indices_hashlist[i] = -1; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist)); |
|
|
cs->last_added_bo = NULL; |
|
|
cs->last_added_bo = NULL; |
|
|
} |
|
|
} |
|
|
|
|
|
|