Just use the inlined function directly. The macro was replaced with
the function in ebe304fa54.
Reviewed-by: Eric Engestrom <eric@engestrom.ch>
tags/19.3-branchpoint
| @@ -697,7 +697,7 @@ hud_stop_queries(struct hud_context *hud, struct pipe_context *pipe) | |||
| if (gr->current_value < | |||
| LIST_ENTRY(struct hud_graph, next, head)->current_value) { | |||
| LIST_DEL(&gr->head); | |||
| LIST_ADD(&gr->head, &next->head); | |||
| list_add(&gr->head, &next->head); | |||
| } | |||
| } | |||
| } | |||
| @@ -56,7 +56,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) | |||
| struct pb_slab *slab = entry->slab; | |||
| LIST_DEL(&entry->head); /* remove from reclaim list */ | |||
| LIST_ADD(&entry->head, &slab->free); | |||
| list_add(&entry->head, &slab->free); | |||
| slab->num_free++; | |||
| /* Add slab to the group's list if it isn't already linked. */ | |||
| @@ -141,7 +141,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) | |||
| return NULL; | |||
| mtx_lock(&slabs->mutex); | |||
| LIST_ADD(&slab->head, &group->slabs); | |||
| list_add(&slab->head, &group->slabs); | |||
| } | |||
| entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head); | |||
| @@ -393,7 +393,7 @@ tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload) | |||
| struct threaded_query *tq = threaded_query(p->query); | |||
| if (!tq->head_unflushed.next) | |||
| LIST_ADD(&tq->head_unflushed, &p->tc->unflushed_queries); | |||
| list_add(&tq->head_unflushed, &p->tc->unflushed_queries); | |||
| pipe->end_query(pipe, p->query); | |||
| } | |||
| @@ -265,7 +265,7 @@ nouveau_fence_work(struct nouveau_fence *fence, | |||
| return false; | |||
| work->func = func; | |||
| work->data = data; | |||
| LIST_ADD(&work->list, &fence->work); | |||
| list_add(&work->list, &fence->work); | |||
| p_atomic_inc(&fence->work_count); | |||
| if (fence->work_count > 64) | |||
| nouveau_fence_kick(fence); | |||
| @@ -148,7 +148,7 @@ mm_slab_new(struct nouveau_mman *cache, int chunk_order) | |||
| slab->order = chunk_order; | |||
| slab->count = slab->free = size >> chunk_order; | |||
| LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free); | |||
| list_add(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free); | |||
| cache->allocated += size; | |||
| @@ -190,7 +190,7 @@ nouveau_mm_allocate(struct nouveau_mman *cache, | |||
| slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head); | |||
| LIST_DEL(&slab->head); | |||
| LIST_ADD(&slab->head, &bucket->used); | |||
| list_add(&slab->head, &bucket->used); | |||
| } | |||
| *offset = mm_slab_alloc(slab) << slab->order; | |||
| @@ -203,7 +203,7 @@ nouveau_mm_allocate(struct nouveau_mman *cache, | |||
| if (slab->free == 0) { | |||
| LIST_DEL(&slab->head); | |||
| LIST_ADD(&slab->head, &bucket->full); | |||
| list_add(&slab->head, &bucket->full); | |||
| } | |||
| alloc->next = NULL; | |||
| @@ -132,12 +132,12 @@ static void sort_cpb(struct rvce_encoder *enc) | |||
| if (l1) { | |||
| LIST_DEL(&l1->list); | |||
| LIST_ADD(&l1->list, &enc->cpb_slots); | |||
| list_add(&l1->list, &enc->cpb_slots); | |||
| } | |||
| if (l0) { | |||
| LIST_DEL(&l0->list); | |||
| LIST_ADD(&l0->list, &enc->cpb_slots); | |||
| list_add(&l0->list, &enc->cpb_slots); | |||
| } | |||
| } | |||
| @@ -342,7 +342,7 @@ static void rvce_end_frame(struct pipe_video_codec *encoder, | |||
| slot->pic_order_cnt = enc->pic.pic_order_cnt; | |||
| if (!enc->pic.not_referenced) { | |||
| LIST_DEL(&slot->list); | |||
| LIST_ADD(&slot->list, &enc->cpb_slots); | |||
| list_add(&slot->list, &enc->cpb_slots); | |||
| } | |||
| } | |||
| @@ -126,12 +126,12 @@ static void sort_cpb(struct rvce_encoder *enc) | |||
| if (l1) { | |||
| LIST_DEL(&l1->list); | |||
| LIST_ADD(&l1->list, &enc->cpb_slots); | |||
| list_add(&l1->list, &enc->cpb_slots); | |||
| } | |||
| if (l0) { | |||
| LIST_DEL(&l0->list); | |||
| LIST_ADD(&l0->list, &enc->cpb_slots); | |||
| list_add(&l0->list, &enc->cpb_slots); | |||
| } | |||
| } | |||
| @@ -341,7 +341,7 @@ static void rvce_end_frame(struct pipe_video_codec *encoder, | |||
| slot->pic_order_cnt = enc->pic.pic_order_cnt; | |||
| if (!enc->pic.not_referenced) { | |||
| LIST_DEL(&slot->list); | |||
| LIST_ADD(&slot->list, &enc->cpb_slots); | |||
| list_add(&slot->list, &enc->cpb_slots); | |||
| } | |||
| } | |||
| @@ -322,7 +322,7 @@ svga_buffer_add_host_surface(struct svga_buffer *sbuf, | |||
| bufsurf->key = *key; | |||
| /* add the surface to the surface list */ | |||
| LIST_ADD(&bufsurf->list, &sbuf->surfaces); | |||
| list_add(&bufsurf->list, &sbuf->surfaces); | |||
| /* Set the new bind flags for this buffer resource */ | |||
| sbuf->bind_flags = bind_flags; | |||
| @@ -139,7 +139,7 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen, | |||
| LIST_DEL(&entry->head); | |||
| /* Add the cache entry (but not the surface!) to the empty list */ | |||
| LIST_ADD(&entry->head, &cache->empty); | |||
| list_add(&entry->head, &cache->empty); | |||
| /* update the cache size */ | |||
| surf_size = surface_size(&entry->key); | |||
| @@ -194,7 +194,7 @@ svga_screen_cache_shrink(struct svga_screen *svgascreen, | |||
| LIST_DEL(&entry->bucket_head); | |||
| LIST_DEL(&entry->head); | |||
| LIST_ADD(&entry->head, &cache->empty); | |||
| list_add(&entry->head, &cache->empty); | |||
| if (cache->total_size <= target_size) { | |||
| /* all done */ | |||
| @@ -294,9 +294,9 @@ svga_screen_cache_add(struct svga_screen *svgascreen, | |||
| /* If we don't have gb objects, we don't need to invalidate. */ | |||
| if (sws->have_gb_objects) | |||
| LIST_ADD(&entry->head, &cache->validated); | |||
| list_add(&entry->head, &cache->validated); | |||
| else | |||
| LIST_ADD(&entry->head, &cache->invalidated); | |||
| list_add(&entry->head, &cache->invalidated); | |||
| cache->total_size += surf_size; | |||
| } | |||
| @@ -343,11 +343,11 @@ svga_screen_cache_flush(struct svga_screen *svgascreen, | |||
| sws->fence_reference(sws, &entry->fence, fence); | |||
| /* Add entry to the unused list */ | |||
| LIST_ADD(&entry->head, &cache->unused); | |||
| list_add(&entry->head, &cache->unused); | |||
| /* Add entry to the hash table bucket */ | |||
| bucket = svga_screen_cache_bucket(&entry->key); | |||
| LIST_ADD(&entry->bucket_head, &cache->bucket[bucket]); | |||
| list_add(&entry->bucket_head, &cache->bucket[bucket]); | |||
| } | |||
| curr = next; | |||
| @@ -386,7 +386,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen, | |||
| } | |||
| /* add the entry to the invalidated list */ | |||
| LIST_ADD(&entry->head, &cache->invalidated); | |||
| list_add(&entry->head, &cache->invalidated); | |||
| } | |||
| curr = next; | |||
| @@ -1036,7 +1036,7 @@ st_framebuffer_reuse_or_create(struct st_context *st, | |||
| } | |||
| /* add to the context's winsys buffers list */ | |||
| LIST_ADD(&cur->head, &st->winsys_buffers); | |||
| list_add(&cur->head, &st->winsys_buffers); | |||
| st_framebuffer_reference(&stfb, cur); | |||
| } | |||
| @@ -158,7 +158,6 @@ static inline void list_validate(const struct list_head *list) | |||
| assert(node->next->prev == node && node->prev->next == node); | |||
| } | |||
| #define LIST_ADD(__item, __list) list_add(__item, __list) | |||
| #define LIST_REPLACE(__from, __to) list_replace(__from, __to) | |||
| #define LIST_DEL(__item) list_del(__item) | |||
| #define LIST_DELINIT(__item) list_delinit(__item) | |||
| @@ -77,7 +77,7 @@ add_to_atexit_list(struct util_queue *queue) | |||
| call_once(&atexit_once_flag, global_init); | |||
| mtx_lock(&exit_mutex); | |||
| LIST_ADD(&queue->head, &queue_list); | |||
| list_add(&queue->head, &queue_list); | |||
| mtx_unlock(&exit_mutex); | |||
| } | |||