Just use the inlined function directly. The macro was replaced with
the function in ebe304fa54
.
Reviewed-by: Eric Engestrom <eric@engestrom.ch>
tags/19.3-branchpoint
@@ -291,7 +291,7 @@ static void radv_amdgpu_add_buffer_to_global_list(struct radv_amdgpu_winsys_bo * | |||
if (bo->ws->debug_all_bos) { | |||
pthread_mutex_lock(&ws->global_bo_list_lock); | |||
LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list); | |||
list_addtail(&bo->global_list_item, &ws->global_bo_list); | |||
ws->num_buffers++; | |||
pthread_mutex_unlock(&ws->global_bo_list_lock); | |||
} |
@@ -946,7 +946,7 @@ hud_pane_add_graph(struct hud_pane *pane, struct hud_graph *gr) | |||
gr->color[1] = colors[color][1]; | |||
gr->color[2] = colors[color][2]; | |||
gr->pane = pane; | |||
LIST_ADDTAIL(&gr->head, &pane->graph_list); | |||
list_addtail(&gr->head, &pane->graph_list); | |||
pane->num_graphs++; | |||
pane->next_color++; | |||
} | |||
@@ -1458,7 +1458,7 @@ hud_parse_env_var(struct hud_context *hud, struct pipe_screen *screen, | |||
height = 100; | |||
if (pane && pane->num_graphs) { | |||
LIST_ADDTAIL(&pane->head, &hud->pane_list); | |||
list_addtail(&pane->head, &hud->pane_list); | |||
pane = NULL; | |||
} | |||
break; | |||
@@ -1471,7 +1471,7 @@ hud_parse_env_var(struct hud_context *hud, struct pipe_screen *screen, | |||
height = 100; | |||
if (pane && pane->num_graphs) { | |||
LIST_ADDTAIL(&pane->head, &hud->pane_list); | |||
list_addtail(&pane->head, &hud->pane_list); | |||
pane = NULL; | |||
} | |||
@@ -1494,7 +1494,7 @@ hud_parse_env_var(struct hud_context *hud, struct pipe_screen *screen, | |||
if (pane) { | |||
if (pane->num_graphs) { | |||
LIST_ADDTAIL(&pane->head, &hud->pane_list); | |||
list_addtail(&pane->head, &hud->pane_list); | |||
} | |||
else { | |||
FREE(pane); |
@@ -279,7 +279,7 @@ fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, | |||
LIST_DEL(&fenced_buf->head); | |||
assert(fenced_mgr->num_unfenced); | |||
--fenced_mgr->num_unfenced; | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->fenced); | |||
++fenced_mgr->num_fenced; | |||
} | |||
@@ -309,7 +309,7 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, | |||
assert(fenced_mgr->num_fenced); | |||
--fenced_mgr->num_fenced; | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); | |||
++fenced_mgr->num_unfenced; | |||
if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) { | |||
@@ -939,7 +939,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr, | |||
assert(fenced_buf->buffer || fenced_buf->data); | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); | |||
++fenced_mgr->num_unfenced; | |||
mtx_unlock(&fenced_mgr->mutex); | |||
@@ -421,7 +421,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr, | |||
(void) mtx_init(&buf->mutex, mtx_plain); | |||
mtx_lock(&mgr->mutex); | |||
LIST_ADDTAIL(&buf->head, &mgr->list); | |||
list_addtail(&buf->head, &mgr->list); | |||
mtx_unlock(&mgr->mutex); | |||
return &buf->base; |
@@ -205,11 +205,11 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf) | |||
buf->mapCount = 0; | |||
LIST_DEL(list); | |||
LIST_ADDTAIL(list, &slab->freeBuffers); | |||
list_addtail(list, &slab->freeBuffers); | |||
slab->numFree++; | |||
if (slab->head.next == &slab->head) | |||
LIST_ADDTAIL(&slab->head, &mgr->slabs); | |||
list_addtail(&slab->head, &mgr->slabs); | |||
/* If the slab becomes totally empty, free it */ | |||
if (slab->numFree == slab->numBuffers) { | |||
@@ -350,13 +350,13 @@ pb_slab_create(struct pb_slab_manager *mgr) | |||
buf->start = i* mgr->bufSize; | |||
buf->mapCount = 0; | |||
cnd_init(&buf->event); | |||
LIST_ADDTAIL(&buf->head, &slab->freeBuffers); | |||
list_addtail(&buf->head, &slab->freeBuffers); | |||
slab->numFree++; | |||
buf++; | |||
} | |||
/* Add this slab to the list of partial slabs */ | |||
LIST_ADDTAIL(&slab->head, &mgr->slabs); | |||
list_addtail(&slab->head, &mgr->slabs); | |||
return PIPE_OK; | |||
@@ -104,7 +104,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry) | |||
entry->start = os_time_get(); | |||
entry->end = entry->start + mgr->usecs; | |||
LIST_ADDTAIL(&entry->head, cache); | |||
list_addtail(&entry->head, cache); | |||
++mgr->num_buffers; | |||
mgr->cache_size += buf->size; | |||
mtx_unlock(&mgr->mutex); |
@@ -62,7 +62,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) | |||
/* Add slab to the group's list if it isn't already linked. */ | |||
if (!slab->head.next) { | |||
struct pb_slab_group *group = &slabs->groups[entry->group_index]; | |||
LIST_ADDTAIL(&slab->head, &group->slabs); | |||
list_addtail(&slab->head, &group->slabs); | |||
} | |||
if (slab->num_free >= slab->num_entries) { | |||
@@ -163,7 +163,7 @@ void | |||
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) | |||
{ | |||
mtx_lock(&slabs->mutex); | |||
LIST_ADDTAIL(&entry->head, &slabs->reclaim); | |||
list_addtail(&entry->head, &slabs->reclaim); | |||
mtx_unlock(&slabs->mutex); | |||
} | |||
@@ -155,7 +155,7 @@ debug_malloc(const char *file, unsigned line, const char *function, | |||
ftr->magic = DEBUG_MEMORY_MAGIC; | |||
mtx_lock(&list_mutex); | |||
LIST_ADDTAIL(&hdr->head, &list); | |||
list_addtail(&hdr->head, &list); | |||
mtx_unlock(&list_mutex); | |||
return data_from_header(hdr); |
@@ -106,7 +106,7 @@ static inline void | |||
util_dirty_surface_set_dirty(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds) | |||
{ | |||
if(LIST_IS_EMPTY(&ds->dirty_list)) | |||
LIST_ADDTAIL(&ds->dirty_list, &dss->dirty_list); | |||
list_addtail(&ds->dirty_list, &dss->dirty_list); | |||
} | |||
static inline void |
@@ -223,11 +223,11 @@ nouveau_mm_free(struct nouveau_mm_allocation *alloc) | |||
if (slab->free == slab->count) { | |||
LIST_DEL(&slab->head); | |||
LIST_ADDTAIL(&slab->head, &bucket->free); | |||
list_addtail(&slab->head, &bucket->free); | |||
} else | |||
if (slab->free == 1) { | |||
LIST_DEL(&slab->head); | |||
LIST_ADDTAIL(&slab->head, &bucket->used); | |||
list_addtail(&slab->head, &bucket->used); | |||
} | |||
FREE(alloc); |
@@ -80,7 +80,7 @@ nv30_query_object_new(struct nv30_screen *screen) | |||
nv30_query_object_del(screen, &oq); | |||
} | |||
LIST_ADDTAIL(&qo->list, &screen->queries); | |||
list_addtail(&qo->list, &screen->queries); | |||
ntfy = nv30_ntfy(screen, qo); | |||
ntfy[0] = 0x00000000; |
@@ -698,7 +698,7 @@ texture_buffer_sampler_view(struct r600_context *rctx, | |||
view->tex_resource = &tmp->resource; | |||
if (tmp->resource.gpu_address) | |||
LIST_ADDTAIL(&view->list, &rctx->texture_buffers); | |||
list_addtail(&view->list, &rctx->texture_buffers); | |||
return &view->base; | |||
} | |||
@@ -167,7 +167,7 @@ int r600_bytecode_add_cf(struct r600_bytecode *bc) | |||
if (!cf) | |||
return -ENOMEM; | |||
LIST_ADDTAIL(&cf->list, &bc->cf); | |||
list_addtail(&cf->list, &bc->cf); | |||
if (bc->cf_last) { | |||
cf->id = bc->cf_last->id + 2; | |||
if (bc->cf_last->eg_alu_extended) { | |||
@@ -930,7 +930,7 @@ static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu | |||
if (result[i]) { | |||
LIST_DEL(&result[i]->list); | |||
result[i]->last = 0; | |||
LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu); | |||
list_addtail(&result[i]->list, &bc->cf_last->alu); | |||
} | |||
} | |||
@@ -1266,7 +1266,7 @@ int r600_bytecode_add_alu_type(struct r600_bytecode *bc, | |||
if (nalu->dst.sel >= bc->ngpr) { | |||
bc->ngpr = nalu->dst.sel + 1; | |||
} | |||
LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu); | |||
list_addtail(&nalu->list, &bc->cf_last->alu); | |||
/* each alu use 2 dwords */ | |||
bc->cf_last->ndw += 2; | |||
bc->ndw += 2; | |||
@@ -1407,7 +1407,7 @@ static int r600_bytecode_add_vtx_internal(struct r600_bytecode *bc, const struct | |||
return -EINVAL; | |||
} | |||
} | |||
LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx); | |||
list_addtail(&nvtx->list, &bc->cf_last->vtx); | |||
/* each fetch use 4 dwords */ | |||
bc->cf_last->ndw += 4; | |||
bc->ndw += 4; | |||
@@ -1477,7 +1477,7 @@ int r600_bytecode_add_tex(struct r600_bytecode *bc, const struct r600_bytecode_t | |||
if (ntex->dst_gpr >= bc->ngpr) { | |||
bc->ngpr = ntex->dst_gpr + 1; | |||
} | |||
LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex); | |||
list_addtail(&ntex->list, &bc->cf_last->tex); | |||
/* each texture fetch use 4 dwords */ | |||
bc->cf_last->ndw += 4; | |||
bc->ndw += 4; | |||
@@ -1511,7 +1511,7 @@ int r600_bytecode_add_gds(struct r600_bytecode *bc, const struct r600_bytecode_g | |||
bc->cf_last->op = CF_OP_GDS; | |||
} | |||
LIST_ADDTAIL(&ngds->list, &bc->cf_last->gds); | |||
list_addtail(&ngds->list, &bc->cf_last->gds); | |||
bc->cf_last->ndw += 4; /* each GDS uses 4 dwords */ | |||
if ((bc->cf_last->ndw / 4) >= r600_bytecode_num_tex_and_vtx_instructions(bc)) | |||
bc->force_add_cf = 1; |
@@ -1047,7 +1047,7 @@ bool r600_query_hw_begin(struct r600_common_context *rctx, | |||
if (!query->buffer.buf) | |||
return false; | |||
LIST_ADDTAIL(&query->list, &rctx->active_queries); | |||
list_addtail(&query->list, &rctx->active_queries); | |||
return true; | |||
} | |||
@@ -104,7 +104,7 @@ static void reset_cpb(struct rvce_encoder *enc) | |||
slot->picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP; | |||
slot->frame_num = 0; | |||
slot->pic_order_cnt = 0; | |||
LIST_ADDTAIL(&slot->list, &enc->cpb_slots); | |||
list_addtail(&slot->list, &enc->cpb_slots); | |||
} | |||
} | |||
@@ -98,7 +98,7 @@ static void reset_cpb(struct rvce_encoder *enc) | |||
slot->picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP; | |||
slot->frame_num = 0; | |||
slot->pic_order_cnt = 0; | |||
LIST_ADDTAIL(&slot->list, &enc->cpb_slots); | |||
list_addtail(&slot->list, &enc->cpb_slots); | |||
} | |||
} | |||
@@ -170,7 +170,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx) | |||
results[32 * i + 16] = 0; | |||
} | |||
LIST_ADDTAIL(&qbuf->list, &sctx->shader_query_buffers); | |||
list_addtail(&qbuf->list, &sctx->shader_query_buffers); | |||
qbuf->head = 0; | |||
qbuf->refcount = sctx->num_active_shader_queries; | |||
@@ -841,7 +841,7 @@ static bool si_pc_query_begin(struct si_context *ctx, struct si_query *squery) | |||
si_query_buffer_reset(ctx, &query->buffer); | |||
LIST_ADDTAIL(&query->b.active_list, &ctx->active_queries); | |||
list_addtail(&query->b.active_list, &ctx->active_queries); | |||
ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend; | |||
si_pc_query_resume(ctx, squery); |
@@ -1147,7 +1147,7 @@ bool si_query_hw_begin(struct si_context *sctx, | |||
if (!query->buffer.buf) | |||
return false; | |||
LIST_ADDTAIL(&query->b.active_list, &sctx->active_queries); | |||
list_addtail(&query->b.active_list, &sctx->active_queries); | |||
sctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend; | |||
return true; | |||
} |
@@ -1065,7 +1065,7 @@ svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf, | |||
if (ret == PIPE_OK) { | |||
sbuf->dma.pending = TRUE; | |||
assert(!sbuf->head.prev && !sbuf->head.next); | |||
LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers); | |||
list_addtail(&sbuf->head, &svga->dirty_buffers); | |||
} | |||
} | |||
else if (ret == PIPE_ERROR_OUT_OF_MEMORY) { |
@@ -446,7 +446,7 @@ svga_screen_cache_init(struct svga_screen *svgascreen) | |||
list_inithead(&cache->empty); | |||
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i) | |||
LIST_ADDTAIL(&cache->entries[i].head, &cache->empty); | |||
list_addtail(&cache->entries[i].head, &cache->empty); | |||
return PIPE_OK; | |||
} |
@@ -736,7 +736,7 @@ static void vid_dec_h265_EndFrame(vid_dec_PrivateType *priv) | |||
entry->timestamp = priv->timestamp; | |||
entry->poc = get_poc(priv); | |||
LIST_ADDTAIL(&entry->list, &priv->codec_data.h265.dpb_list); | |||
list_addtail(&entry->list, &priv->codec_data.h265.dpb_list); | |||
++priv->codec_data.h265.dpb_num; | |||
priv->target = NULL; | |||
@@ -829,7 +829,7 @@ static void enc_ClearBframes(omx_base_PortType *port, struct input_buf_private * | |||
/* promote last from to P frame */ | |||
priv->ref_idx_l0 = priv->ref_idx_l1; | |||
enc_HandleTask(port, task, PIPE_H264_ENC_PICTURE_TYPE_P); | |||
LIST_ADDTAIL(&task->list, &inp->tasks); | |||
list_addtail(&task->list, &inp->tasks); | |||
priv->ref_idx_l1 = priv->frame_num++; | |||
/* handle B frames */ | |||
@@ -900,12 +900,12 @@ static OMX_ERRORTYPE vid_enc_EncodeFrame(omx_base_PortType *port, OMX_BUFFERHEAD | |||
if (picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { | |||
/* put frame at the tail of the queue */ | |||
LIST_ADDTAIL(&task->list, &priv->b_frames); | |||
list_addtail(&task->list, &priv->b_frames); | |||
} else { | |||
/* handle I or P frame */ | |||
priv->ref_idx_l0 = priv->ref_idx_l1; | |||
enc_HandleTask(port, task, picture_type); | |||
LIST_ADDTAIL(&task->list, &priv->stacked_tasks); | |||
list_addtail(&task->list, &priv->stacked_tasks); | |||
LIST_FOR_EACH_ENTRY(task, &priv->stacked_tasks, list) { | |||
++stacked_num; | |||
} | |||
@@ -913,7 +913,7 @@ static OMX_ERRORTYPE vid_enc_EncodeFrame(omx_base_PortType *port, OMX_BUFFERHEAD | |||
struct encode_task *t; | |||
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list); | |||
LIST_DEL(&t->list); | |||
LIST_ADDTAIL(&t->list, &inp->tasks); | |||
list_addtail(&t->list, &inp->tasks); | |||
} | |||
priv->ref_idx_l1 = priv->frame_num++; | |||
@@ -277,7 +277,7 @@ static void enc_ClearBframes(vid_enc_PrivateType * priv, struct input_buf_privat | |||
/* promote last from to P frame */ | |||
priv->ref_idx_l0 = priv->ref_idx_l1; | |||
enc_HandleTask(priv, task, PIPE_H264_ENC_PICTURE_TYPE_P); | |||
LIST_ADDTAIL(&task->list, &inp->tasks); | |||
list_addtail(&task->list, &inp->tasks); | |||
priv->ref_idx_l1 = priv->frame_num++; | |||
/* handle B frames */ | |||
@@ -354,12 +354,12 @@ static OMX_ERRORTYPE encode_frame(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY | |||
if (picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) { | |||
/* put frame at the tail of the queue */ | |||
LIST_ADDTAIL(&task->list, &priv->b_frames); | |||
list_addtail(&task->list, &priv->b_frames); | |||
} else { | |||
/* handle I or P frame */ | |||
priv->ref_idx_l0 = priv->ref_idx_l1; | |||
enc_HandleTask(priv, task, picture_type); | |||
LIST_ADDTAIL(&task->list, &priv->stacked_tasks); | |||
list_addtail(&task->list, &priv->stacked_tasks); | |||
LIST_FOR_EACH_ENTRY(task, &priv->stacked_tasks, list) { | |||
++stacked_num; | |||
} | |||
@@ -367,7 +367,7 @@ static OMX_ERRORTYPE encode_frame(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY | |||
struct encode_task *t; | |||
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list); | |||
LIST_DEL(&t->list); | |||
LIST_ADDTAIL(&t->list, &inp->tasks); | |||
list_addtail(&t->list, &inp->tasks); | |||
} | |||
priv->ref_idx_l1 = priv->frame_num++; | |||
@@ -136,7 +136,7 @@ void vid_dec_h264_EndFrame(vid_dec_PrivateType *priv) | |||
entry->buffer = priv->target; | |||
entry->timestamp = priv->timestamp; | |||
entry->poc = MIN2(priv->picture.h264.field_order_cnt[0], priv->picture.h264.field_order_cnt[1]); | |||
LIST_ADDTAIL(&entry->list, &priv->codec_data.h264.dpb_list); | |||
list_addtail(&entry->list, &priv->codec_data.h264.dpb_list); | |||
++priv->codec_data.h264.dpb_num; | |||
priv->target = NULL; | |||
priv->picture.h264.field_order_cnt[0] = priv->picture.h264.field_order_cnt[1] = INT_MAX; |
@@ -139,7 +139,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY | |||
task = LIST_ENTRY(struct encode_task, inp->tasks.next, list); | |||
LIST_DEL(&task->list); | |||
LIST_ADDTAIL(&task->list, &priv->used_tasks); | |||
list_addtail(&task->list, &priv->used_tasks); | |||
if (!task->bitstream) | |||
return; |
@@ -414,7 +414,7 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo) | |||
if (ws->debug_all_bos) { | |||
simple_mtx_lock(&ws->global_bo_list_lock); | |||
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list); | |||
list_addtail(&bo->u.real.global_list_item, &ws->global_bo_list); | |||
ws->num_buffers++; | |||
simple_mtx_unlock(&ws->global_bo_list_lock); | |||
} | |||
@@ -692,7 +692,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, | |||
assert(bo->u.slab.real->bo); | |||
} | |||
LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free); | |||
list_addtail(&bo->u.slab.entry.head, &slab->base.free); | |||
} | |||
return &slab->base; |
@@ -815,7 +815,7 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap, | |||
bo->u.slab.entry.group_index = group_index; | |||
bo->u.slab.real = slab->buffer; | |||
LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free); | |||
list_addtail(&bo->u.slab.entry.head, &slab->base.free); | |||
} | |||
return &slab->base; |
@@ -242,7 +242,7 @@ fenced_buffer_add_locked(struct fenced_manager *fenced_mgr, | |||
LIST_DEL(&fenced_buf->head); | |||
assert(fenced_mgr->num_unfenced); | |||
--fenced_mgr->num_unfenced; | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->fenced); | |||
++fenced_mgr->num_fenced; | |||
} | |||
@@ -272,7 +272,7 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr, | |||
assert(fenced_mgr->num_fenced); | |||
--fenced_mgr->num_fenced; | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); | |||
++fenced_mgr->num_unfenced; | |||
if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) { | |||
@@ -756,7 +756,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr, | |||
assert(fenced_buf->buffer); | |||
LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced); | |||
list_addtail(&fenced_buf->head, &fenced_mgr->unfenced); | |||
++fenced_mgr->num_unfenced; | |||
mtx_unlock(&fenced_mgr->mutex); | |||
@@ -218,7 +218,7 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle, | |||
list_inithead(&fence->ops_list); | |||
} else { | |||
p_atomic_set(&fence->signalled, 0); | |||
LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled); | |||
list_addtail(&fence->ops_list, &ops->not_signaled); | |||
} | |||
mtx_unlock(&ops->mutex); |
@@ -91,7 +91,7 @@ virgl_resource_cache_add(struct virgl_resource_cache *cache, | |||
entry->timeout_start = now; | |||
entry->timeout_end = entry->timeout_start + cache->timeout_usecs; | |||
LIST_ADDTAIL(&entry->head, &cache->resources); | |||
list_addtail(&entry->head, &cache->resources); | |||
} | |||
struct virgl_resource_cache_entry * |
@@ -307,7 +307,7 @@ st_save_zombie_sampler_view(struct st_context *st, | |||
* while free_zombie_resource_views() is called from another. | |||
*/ | |||
simple_mtx_lock(&st->zombie_sampler_views.mutex); | |||
LIST_ADDTAIL(&entry->node, &st->zombie_sampler_views.list.node); | |||
list_addtail(&entry->node, &st->zombie_sampler_views.list.node); | |||
simple_mtx_unlock(&st->zombie_sampler_views.mutex); | |||
} | |||
@@ -340,7 +340,7 @@ st_save_zombie_shader(struct st_context *st, | |||
* while free_zombie_shaders() is called from another. | |||
*/ | |||
simple_mtx_lock(&st->zombie_shaders.mutex); | |||
LIST_ADDTAIL(&entry->node, &st->zombie_shaders.list.node); | |||
list_addtail(&entry->node, &st->zombie_shaders.list.node); | |||
simple_mtx_unlock(&st->zombie_shaders.mutex); | |||
} | |||
@@ -159,7 +159,6 @@ static inline void list_validate(const struct list_head *list) | |||
} | |||
#define LIST_ADD(__item, __list) list_add(__item, __list) | |||
#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list) | |||
#define LIST_REPLACE(__from, __to) list_replace(__from, __to) | |||
#define LIST_DEL(__item) list_del(__item) | |||
#define LIST_DELINIT(__item) list_delinit(__item) |