Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Acked-by: Ilia Mirkin <imirkin@alum.mit.edu>tags/11.0-branchpoint
| @@ -151,10 +151,10 @@ struct nv50_ir_prog_info | |||
| } gp; | |||
| struct { | |||
| unsigned numColourResults; | |||
| boolean writesDepth; | |||
| boolean earlyFragTests; | |||
| boolean separateFragData; | |||
| boolean usesDiscard; | |||
| bool writesDepth; | |||
| bool earlyFragTests; | |||
| bool separateFragData; | |||
| bool usesDiscard; | |||
| } fp; | |||
| struct { | |||
| uint32_t inputOffset; /* base address for user args */ | |||
| @@ -180,11 +180,11 @@ struct nv50_ir_prog_info | |||
| int8_t viewportId; /* output index of ViewportIndex */ | |||
| uint8_t fragDepth; /* output index of FragDepth */ | |||
| uint8_t sampleMask; /* output index of SampleMask */ | |||
| boolean sampleInterp; /* perform sample interp on all fp inputs */ | |||
| bool sampleInterp; /* perform sample interp on all fp inputs */ | |||
| uint8_t backFaceColor[2]; /* input/output indices of back face colour */ | |||
| uint8_t globalAccess; /* 1 for read, 2 for wr, 3 for rw */ | |||
| boolean fp64; /* program uses fp64 math */ | |||
| boolean nv50styleSurfaces; /* generate gX[] access for raw buffers */ | |||
| bool fp64; /* program uses fp64 math */ | |||
| bool nv50styleSurfaces; /* generate gX[] access for raw buffers */ | |||
| uint8_t resInfoCBSlot; /* cX[] used for tex handles, surface info */ | |||
| uint16_t texBindBase; /* base address for tex handles (nve4) */ | |||
| uint16_t suInfoBase; /* base address for surface info (nve4) */ | |||
| @@ -826,7 +826,7 @@ Source::Source(struct nv50_ir_prog_info *prog) : info(prog) | |||
| if (prog->dbgFlags & NV50_IR_DEBUG_BASIC) | |||
| tgsi_dump(tokens, 0); | |||
| mainTempsInLMem = FALSE; | |||
| mainTempsInLMem = false; | |||
| } | |||
| Source::~Source() | |||
| @@ -937,7 +937,7 @@ void Source::scanProperty(const struct tgsi_full_property *prop) | |||
| info->prop.gp.instanceCount = prop->u[0].Data; | |||
| break; | |||
| case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS: | |||
| info->prop.fp.separateFragData = TRUE; | |||
| info->prop.fp.separateFragData = true; | |||
| break; | |||
| case TGSI_PROPERTY_FS_COORD_ORIGIN: | |||
| case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER: | |||
| @@ -1155,7 +1155,7 @@ bool Source::scanInstruction(const struct tgsi_full_instruction *inst) | |||
| } else | |||
| if (insn.getDst(0).getFile() == TGSI_FILE_TEMPORARY) { | |||
| if (insn.getDst(0).isIndirect(0)) | |||
| mainTempsInLMem = TRUE; | |||
| mainTempsInLMem = true; | |||
| } | |||
| } | |||
| @@ -1163,7 +1163,7 @@ bool Source::scanInstruction(const struct tgsi_full_instruction *inst) | |||
| Instruction::SrcRegister src = insn.getSrc(s); | |||
| if (src.getFile() == TGSI_FILE_TEMPORARY) { | |||
| if (src.isIndirect(0)) | |||
| mainTempsInLMem = TRUE; | |||
| mainTempsInLMem = true; | |||
| } else | |||
| if (src.getFile() == TGSI_FILE_RESOURCE) { | |||
| if (src.getIndex(0) == TGSI_RESOURCE_GLOBAL) | |||
| @@ -176,7 +176,7 @@ GM107LoweringPass::handlePOPCNT(Instruction *i) | |||
| i->getSrc(0), i->getSrc(1)); | |||
| i->setSrc(0, tmp); | |||
| i->setSrc(1, NULL); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| // | |||
| @@ -28,7 +28,7 @@ nouveau_transfer(struct pipe_transfer *transfer) | |||
| return (struct nouveau_transfer *)transfer; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_malloc(struct nv04_resource *buf) | |||
| { | |||
| if (!buf->data) | |||
| @@ -36,7 +36,7 @@ nouveau_buffer_malloc(struct nv04_resource *buf) | |||
| return !!buf->data; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_allocate(struct nouveau_screen *screen, | |||
| struct nv04_resource *buf, unsigned domain) | |||
| { | |||
| @@ -53,12 +53,12 @@ nouveau_buffer_allocate(struct nouveau_screen *screen, | |||
| buf->mm = nouveau_mm_allocate(screen->mm_GART, size, | |||
| &buf->bo, &buf->offset); | |||
| if (!buf->bo) | |||
| return FALSE; | |||
| return false; | |||
| NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0); | |||
| } else { | |||
| assert(domain == 0); | |||
| if (!nouveau_buffer_malloc(buf)) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| buf->domain = domain; | |||
| if (buf->bo) | |||
| @@ -66,7 +66,7 @@ nouveau_buffer_allocate(struct nouveau_screen *screen, | |||
| util_range_set_empty(&buf->valid_buffer_range); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static INLINE void | |||
| @@ -93,7 +93,7 @@ nouveau_buffer_release_gpu_storage(struct nv04_resource *buf) | |||
| buf->domain = 0; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_reallocate(struct nouveau_screen *screen, | |||
| struct nv04_resource *buf, unsigned domain) | |||
| { | |||
| @@ -134,13 +134,13 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen, | |||
| */ | |||
| static uint8_t * | |||
| nouveau_transfer_staging(struct nouveau_context *nv, | |||
| struct nouveau_transfer *tx, boolean permit_pb) | |||
| struct nouveau_transfer *tx, bool permit_pb) | |||
| { | |||
| const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK; | |||
| const unsigned size = align(tx->base.box.width, 4) + adj; | |||
| if (!nv->push_data) | |||
| permit_pb = FALSE; | |||
| permit_pb = false; | |||
| if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) { | |||
| tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); | |||
| @@ -162,7 +162,7 @@ nouveau_transfer_staging(struct nouveau_context *nv, | |||
| * buffer. Also updates buf->data if present. | |||
| * | |||
| * Maybe just migrate to GART right away if we actually need to do this. */ | |||
| static boolean | |||
| static bool | |||
| nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx) | |||
| { | |||
| struct nv04_resource *buf = nv04_resource(tx->base.resource); | |||
| @@ -175,12 +175,12 @@ nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx) | |||
| buf->bo, buf->offset + base, buf->domain, size); | |||
| if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client)) | |||
| return FALSE; | |||
| return false; | |||
| if (buf->data) | |||
| memcpy(buf->data + base, tx->map, size); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -190,7 +190,7 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, | |||
| struct nv04_resource *buf = nv04_resource(tx->base.resource); | |||
| uint8_t *data = tx->map + offset; | |||
| const unsigned base = tx->base.box.x + offset; | |||
| const boolean can_cb = !((base | size) & 3); | |||
| const bool can_cb = !((base | size) & 3); | |||
| if (buf->data) | |||
| memcpy(data, buf->data + base, size); | |||
| @@ -219,32 +219,32 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, | |||
| /* Does a CPU wait for the buffer's backing data to become reliably accessible | |||
| * for write/read by waiting on the buffer's relevant fences. | |||
| */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw) | |||
| { | |||
| if (rw == PIPE_TRANSFER_READ) { | |||
| if (!buf->fence_wr) | |||
| return TRUE; | |||
| return true; | |||
| NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, | |||
| !nouveau_fence_signalled(buf->fence_wr)); | |||
| if (!nouveau_fence_wait(buf->fence_wr)) | |||
| return FALSE; | |||
| return false; | |||
| } else { | |||
| if (!buf->fence) | |||
| return TRUE; | |||
| return true; | |||
| NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, | |||
| !nouveau_fence_signalled(buf->fence)); | |||
| if (!nouveau_fence_wait(buf->fence)) | |||
| return FALSE; | |||
| return false; | |||
| nouveau_fence_ref(NULL, &buf->fence); | |||
| } | |||
| nouveau_fence_ref(NULL, &buf->fence_wr); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw) | |||
| { | |||
| if (rw == PIPE_TRANSFER_READ) | |||
| @@ -292,11 +292,11 @@ nouveau_buffer_transfer_del(struct nouveau_context *nv, | |||
| } | |||
| /* Creates a cache in system memory of the buffer data. */ | |||
| static boolean | |||
| static bool | |||
| nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) | |||
| { | |||
| struct nouveau_transfer tx; | |||
| boolean ret; | |||
| bool ret; | |||
| tx.base.resource = &buf->base; | |||
| tx.base.box.x = 0; | |||
| tx.base.box.width = buf->base.width0; | |||
| @@ -305,13 +305,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) | |||
| if (!buf->data) | |||
| if (!nouveau_buffer_malloc(buf)) | |||
| return FALSE; | |||
| return false; | |||
| if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY)) | |||
| return TRUE; | |||
| return true; | |||
| nv->stats.buf_cache_count++; | |||
| if (!nouveau_transfer_staging(nv, &tx, FALSE)) | |||
| return FALSE; | |||
| if (!nouveau_transfer_staging(nv, &tx, false)) | |||
| return false; | |||
| ret = nouveau_transfer_read(nv, &tx); | |||
| if (ret) { | |||
| @@ -330,15 +330,15 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) | |||
| * resource. This can be useful if we would otherwise have to wait for a read | |||
| * operation to complete on this data. | |||
| */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage) | |||
| { | |||
| if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) | |||
| return FALSE; | |||
| return false; | |||
| if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) | |||
| return FALSE; | |||
| return false; | |||
| if (unlikely(usage & PIPE_TRANSFER_PERSISTENT)) | |||
| return FALSE; | |||
| return false; | |||
| return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE); | |||
| } | |||
| @@ -408,7 +408,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, | |||
| * back into VRAM on unmap. */ | |||
| if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) | |||
| buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; | |||
| nouveau_transfer_staging(nv, tx, TRUE); | |||
| nouveau_transfer_staging(nv, tx, true); | |||
| } else { | |||
| if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { | |||
| /* The GPU is currently writing to this buffer. Copy its current | |||
| @@ -419,13 +419,13 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, | |||
| align_free(buf->data); | |||
| buf->data = NULL; | |||
| } | |||
| nouveau_transfer_staging(nv, tx, FALSE); | |||
| nouveau_transfer_staging(nv, tx, false); | |||
| nouveau_transfer_read(nv, tx); | |||
| } else { | |||
| /* The buffer is currently idle. Create a staging area for writes, | |||
| * and make sure that the cached data is up-to-date. */ | |||
| if (usage & PIPE_TRANSFER_WRITE) | |||
| nouveau_transfer_staging(nv, tx, TRUE); | |||
| nouveau_transfer_staging(nv, tx, true); | |||
| if (!buf->data) | |||
| nouveau_buffer_cache(nv, buf); | |||
| } | |||
| @@ -477,7 +477,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, | |||
| if (usage & PIPE_TRANSFER_DISCARD_RANGE) { | |||
| /* The whole range is being discarded, so it doesn't matter what was | |||
| * there before. No need to copy anything over. */ | |||
| nouveau_transfer_staging(nv, tx, TRUE); | |||
| nouveau_transfer_staging(nv, tx, true); | |||
| map = tx->map; | |||
| } else | |||
| if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) { | |||
| @@ -488,7 +488,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, | |||
| } else { | |||
| /* It is expected that the returned buffer be a representation of the | |||
| * data in question, so we must copy it over from the buffer. */ | |||
| nouveau_transfer_staging(nv, tx, TRUE); | |||
| nouveau_transfer_staging(nv, tx, true); | |||
| if (tx->map) | |||
| memcpy(tx->map, map, box->width); | |||
| map = tx->map; | |||
| @@ -539,7 +539,7 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe, | |||
| const uint8_t bind = buf->base.bind; | |||
| /* make sure we invalidate dedicated caches */ | |||
| if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) | |||
| nv->vbo_dirty = TRUE; | |||
| nv->vbo_dirty = true; | |||
| } | |||
| util_range_add(&buf->valid_buffer_range, | |||
| @@ -634,7 +634,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen, | |||
| { | |||
| struct nouveau_screen *screen = nouveau_screen(pscreen); | |||
| struct nv04_resource *buffer; | |||
| boolean ret; | |||
| bool ret; | |||
| buffer = CALLOC_STRUCT(nv04_resource); | |||
| if (!buffer) | |||
| @@ -678,7 +678,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen, | |||
| } | |||
| ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); | |||
| if (ret == FALSE) | |||
| if (ret == false) | |||
| goto fail; | |||
| if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy) | |||
| @@ -725,20 +725,20 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr, | |||
| return &buffer->base; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf, | |||
| struct nouveau_bo *bo, unsigned offset, unsigned size) | |||
| { | |||
| if (!nouveau_buffer_malloc(buf)) | |||
| return FALSE; | |||
| return false; | |||
| if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client)) | |||
| return FALSE; | |||
| return false; | |||
| memcpy(buf->data, (uint8_t *)bo->map + offset, size); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ | |||
| boolean | |||
| bool | |||
| nouveau_buffer_migrate(struct nouveau_context *nv, | |||
| struct nv04_resource *buf, const unsigned new_domain) | |||
| { | |||
| @@ -753,7 +753,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv, | |||
| if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { | |||
| if (!nouveau_buffer_allocate(screen, buf, new_domain)) | |||
| return FALSE; | |||
| return false; | |||
| ret = nouveau_bo_map(buf->bo, 0, nv->client); | |||
| if (ret) | |||
| return ret; | |||
| @@ -766,7 +766,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv, | |||
| if (new_domain == NOUVEAU_BO_VRAM) { | |||
| /* keep a system memory copy of our data in case we hit a fallback */ | |||
| if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) | |||
| return FALSE; | |||
| return false; | |||
| if (nouveau_mesa_debug) | |||
| debug_printf("migrating %u KiB to VRAM\n", size / 1024); | |||
| } | |||
| @@ -787,28 +787,28 @@ nouveau_buffer_migrate(struct nouveau_context *nv, | |||
| if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { | |||
| struct nouveau_transfer tx; | |||
| if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) | |||
| return FALSE; | |||
| return false; | |||
| tx.base.resource = &buf->base; | |||
| tx.base.box.x = 0; | |||
| tx.base.box.width = buf->base.width0; | |||
| tx.bo = NULL; | |||
| tx.map = NULL; | |||
| if (!nouveau_transfer_staging(nv, &tx, FALSE)) | |||
| return FALSE; | |||
| if (!nouveau_transfer_staging(nv, &tx, false)) | |||
| return false; | |||
| nouveau_transfer_write(nv, &tx, 0, tx.base.box.width); | |||
| nouveau_buffer_transfer_del(nv, &tx); | |||
| } else | |||
| return FALSE; | |||
| return false; | |||
| assert(buf->domain == new_domain); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART. | |||
| * We'd like to only allocate @size bytes here, but then we'd have to rebase | |||
| * the vertex indices ... | |||
| */ | |||
| boolean | |||
| bool | |||
| nouveau_user_buffer_upload(struct nouveau_context *nv, | |||
| struct nv04_resource *buf, | |||
| unsigned base, unsigned size) | |||
| @@ -820,14 +820,14 @@ nouveau_user_buffer_upload(struct nouveau_context *nv, | |||
| buf->base.width0 = base + size; | |||
| if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART)) | |||
| return FALSE; | |||
| return false; | |||
| ret = nouveau_bo_map(buf->bo, 0, nv->client); | |||
| if (ret) | |||
| return FALSE; | |||
| return false; | |||
| memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| @@ -870,7 +870,7 @@ nouveau_scratch_runout_release(struct nouveau_context *nv) | |||
| /* Allocate an extra bo if we can't fit everything we need simultaneously. | |||
| * (Could happen for very large user arrays.) | |||
| */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) | |||
| { | |||
| int ret; | |||
| @@ -904,7 +904,7 @@ nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) | |||
| /* Continue to next scratch buffer, if available (no wrapping, large enough). | |||
| * Allocate it if it has not yet been created. | |||
| */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_scratch_next(struct nouveau_context *nv, unsigned size) | |||
| { | |||
| struct nouveau_bo *bo; | |||
| @@ -912,14 +912,14 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size) | |||
| const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS; | |||
| if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap)) | |||
| return FALSE; | |||
| return false; | |||
| nv->scratch.id = i; | |||
| bo = nv->scratch.bo[i]; | |||
| if (!bo) { | |||
| ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size); | |||
| if (ret) | |||
| return FALSE; | |||
| return false; | |||
| nv->scratch.bo[i] = bo; | |||
| } | |||
| nv->scratch.current = bo; | |||
| @@ -932,10 +932,10 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size) | |||
| return !ret; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size) | |||
| { | |||
| boolean ret; | |||
| bool ret; | |||
| ret = nouveau_scratch_next(nv, min_size); | |||
| if (!ret) | |||
| @@ -58,7 +58,7 @@ nouveau_copy_buffer(struct nouveau_context *, | |||
| struct nv04_resource *dst, unsigned dst_pos, | |||
| struct nv04_resource *src, unsigned src_pos, unsigned size); | |||
| boolean | |||
| bool | |||
| nouveau_buffer_migrate(struct nouveau_context *, | |||
| struct nv04_resource *, unsigned domain); | |||
| @@ -79,7 +79,7 @@ nv04_resource(struct pipe_resource *resource) | |||
| } | |||
| /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nouveau_resource_mapped_by_gpu(struct pipe_resource *resource) | |||
| { | |||
| return nv04_resource(resource)->domain != 0; | |||
| @@ -93,7 +93,7 @@ struct pipe_resource * | |||
| nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr, | |||
| unsigned bytes, unsigned usage); | |||
| boolean | |||
| bool | |||
| nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *, | |||
| unsigned base, unsigned size); | |||
| @@ -13,7 +13,7 @@ struct nouveau_context { | |||
| struct nouveau_client *client; | |||
| struct nouveau_pushbuf *pushbuf; | |||
| boolean vbo_dirty; | |||
| bool vbo_dirty; | |||
| void (*copy_data)(struct nouveau_context *, | |||
| struct nouveau_bo *dst, unsigned, unsigned, | |||
| @@ -104,7 +104,7 @@ nouveau_context_update_frame_stats(struct nouveau_context *nv) | |||
| nv->stats.buf_cache_count = 0; | |||
| nv->stats.buf_cache_frame |= 1; | |||
| if ((nv->stats.buf_cache_frame & 0xf) == 0xf) | |||
| nv->screen->hint_buf_keep_sysmem_copy = TRUE; | |||
| nv->screen->hint_buf_keep_sysmem_copy = true; | |||
| } | |||
| } | |||
| @@ -28,13 +28,13 @@ | |||
| #include <sched.h> | |||
| #endif | |||
| boolean | |||
| bool | |||
| nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence, | |||
| boolean emit) | |||
| bool emit) | |||
| { | |||
| *fence = CALLOC_STRUCT(nouveau_fence); | |||
| if (!*fence) | |||
| return FALSE; | |||
| return false; | |||
| (*fence)->screen = screen; | |||
| (*fence)->ref = 1; | |||
| @@ -43,7 +43,7 @@ nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence, | |||
| if (emit) | |||
| nouveau_fence_emit(*fence); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -58,7 +58,7 @@ nouveau_fence_trigger_work(struct nouveau_fence *fence) | |||
| } | |||
| } | |||
| boolean | |||
| bool | |||
| nouveau_fence_work(struct nouveau_fence *fence, | |||
| void (*func)(void *), void *data) | |||
| { | |||
| @@ -66,16 +66,16 @@ nouveau_fence_work(struct nouveau_fence *fence, | |||
| if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) { | |||
| func(data); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| work = CALLOC_STRUCT(nouveau_fence_work); | |||
| if (!work) | |||
| return FALSE; | |||
| return false; | |||
| work->func = func; | |||
| work->data = data; | |||
| LIST_ADD(&work->list, &fence->work); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -132,7 +132,7 @@ nouveau_fence_del(struct nouveau_fence *fence) | |||
| } | |||
| void | |||
| nouveau_fence_update(struct nouveau_screen *screen, boolean flushed) | |||
| nouveau_fence_update(struct nouveau_screen *screen, bool flushed) | |||
| { | |||
| struct nouveau_fence *fence; | |||
| struct nouveau_fence *next = NULL; | |||
| @@ -167,21 +167,21 @@ nouveau_fence_update(struct nouveau_screen *screen, boolean flushed) | |||
| #define NOUVEAU_FENCE_MAX_SPINS (1 << 31) | |||
| boolean | |||
| bool | |||
| nouveau_fence_signalled(struct nouveau_fence *fence) | |||
| { | |||
| struct nouveau_screen *screen = fence->screen; | |||
| if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) | |||
| return TRUE; | |||
| return true; | |||
| if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED) | |||
| nouveau_fence_update(screen, FALSE); | |||
| nouveau_fence_update(screen, false); | |||
| return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED; | |||
| } | |||
| boolean | |||
| bool | |||
| nouveau_fence_wait(struct nouveau_fence *fence) | |||
| { | |||
| struct nouveau_screen *screen = fence->screen; | |||
| @@ -195,16 +195,16 @@ nouveau_fence_wait(struct nouveau_fence *fence) | |||
| if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED) | |||
| if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel)) | |||
| return FALSE; | |||
| return false; | |||
| if (fence == screen->fence.current) | |||
| nouveau_fence_next(screen); | |||
| do { | |||
| nouveau_fence_update(screen, FALSE); | |||
| nouveau_fence_update(screen, false); | |||
| if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) | |||
| return TRUE; | |||
| return true; | |||
| if (!spins) | |||
| NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1); | |||
| spins++; | |||
| @@ -218,7 +218,7 @@ nouveau_fence_wait(struct nouveau_fence *fence) | |||
| fence->sequence, | |||
| screen->fence.sequence_ack, screen->fence.sequence); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| void | |||
| @@ -229,5 +229,5 @@ nouveau_fence_next(struct nouveau_screen *screen) | |||
| nouveau_fence_ref(NULL, &screen->fence.current); | |||
| nouveau_fence_new(screen, &screen->fence.current, FALSE); | |||
| nouveau_fence_new(screen, &screen->fence.current, false); | |||
| } | |||
| @@ -29,13 +29,13 @@ struct nouveau_fence { | |||
| void nouveau_fence_emit(struct nouveau_fence *); | |||
| void nouveau_fence_del(struct nouveau_fence *); | |||
| boolean nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **, | |||
| boolean emit); | |||
| boolean nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); | |||
| void nouveau_fence_update(struct nouveau_screen *, boolean flushed); | |||
| void nouveau_fence_next(struct nouveau_screen *); | |||
| boolean nouveau_fence_wait(struct nouveau_fence *); | |||
| boolean nouveau_fence_signalled(struct nouveau_fence *); | |||
| bool nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **, | |||
| bool emit); | |||
| bool nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); | |||
| void nouveau_fence_update(struct nouveau_screen *, bool flushed); | |||
| void nouveau_fence_next(struct nouveau_screen *); | |||
| bool nouveau_fence_wait(struct nouveau_fence *); | |||
| bool nouveau_fence_signalled(struct nouveau_fence *); | |||
| static INLINE void | |||
| nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref) | |||
| @@ -111,7 +111,7 @@ nouveau_screen_bo_from_handle(struct pipe_screen *pscreen, | |||
| } | |||
| boolean | |||
| bool | |||
| nouveau_screen_bo_get_handle(struct pipe_screen *pscreen, | |||
| struct nouveau_bo *bo, | |||
| unsigned stride, | |||
| @@ -123,11 +123,11 @@ nouveau_screen_bo_get_handle(struct pipe_screen *pscreen, | |||
| return nouveau_bo_name_get(bo, &whandle->handle) == 0; | |||
| } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { | |||
| whandle->handle = bo->handle; | |||
| return TRUE; | |||
| return true; | |||
| } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) { | |||
| return nouveau_bo_set_prime(bo, (int *)&whandle->handle) == 0; | |||
| } else { | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| } | |||
| @@ -49,7 +49,7 @@ struct nouveau_screen { | |||
| int64_t cpu_gpu_time_delta; | |||
| boolean hint_buf_keep_sysmem_copy; | |||
| bool hint_buf_keep_sysmem_copy; | |||
| unsigned vram_domain; | |||
| @@ -118,9 +118,9 @@ nouveau_screen(struct pipe_screen *pscreen) | |||
| return (struct nouveau_screen *)pscreen; | |||
| } | |||
| boolean nouveau_drm_screen_unref(struct nouveau_screen *screen); | |||
| bool nouveau_drm_screen_unref(struct nouveau_screen *screen); | |||
| boolean | |||
| bool | |||
| nouveau_screen_bo_get_handle(struct pipe_screen *pscreen, | |||
| struct nouveau_bo *bo, | |||
| unsigned stride, | |||
| @@ -296,16 +296,16 @@ nouveau_vpe_mb_mv_header(struct nouveau_decoder *dec, | |||
| case PIPE_MPEG12_MO_TYPE_DUAL_PRIME: { | |||
| base = NV17_MPEG_CMD_CHROMA_MV_HEADER_COUNT_2; | |||
| if (forward) { | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, FALSE, | |||
| x, y, mb->PMV[0][0], dec->past, TRUE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, TRUE, | |||
| x, y2, mb->PMV[0][0], dec->past, FALSE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, false, | |||
| x, y, mb->PMV[0][0], dec->past, true); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, true, | |||
| x, y2, mb->PMV[0][0], dec->past, false); | |||
| } | |||
| if (backward && forward) { | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, TRUE, | |||
| x, y, mb->PMV[1][0], dec->future, TRUE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, FALSE, | |||
| x, y2, mb->PMV[1][1], dec->future, FALSE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, true, | |||
| x, y, mb->PMV[1][0], dec->future, true); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, false, | |||
| x, y2, mb->PMV[1][1], dec->future, false); | |||
| } else assert(!backward); | |||
| break; | |||
| } | |||
| @@ -320,13 +320,13 @@ nouveau_vpe_mb_mv_header(struct nouveau_decoder *dec, | |||
| if (frame) | |||
| base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_TYPE_FRAME; | |||
| if (forward) | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, | |||
| dec->picture_structure != PIPE_MPEG12_PICTURE_STRUCTURE_FIELD_TOP, | |||
| x, y, mb->PMV[0][0], dec->past, TRUE); | |||
| x, y, mb->PMV[0][0], dec->past, true); | |||
| if (backward && forward) | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, FALSE, | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, false, | |||
| dec->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FIELD_TOP, | |||
| x, y, mb->PMV[0][1], dec->future, TRUE); | |||
| x, y, mb->PMV[0][1], dec->future, true); | |||
| else assert(!backward); | |||
| break; | |||
| } | |||
| @@ -341,11 +341,11 @@ mv1: | |||
| base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_TYPE_FRAME; | |||
| /* frame 16x16 */ | |||
| if (forward) | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, FALSE, | |||
| x, y, mb->PMV[0][0], dec->past, TRUE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, false, | |||
| x, y, mb->PMV[0][0], dec->past, true); | |||
| if (backward) | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, FALSE, | |||
| x, y, mb->PMV[0][1], dec->future, TRUE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, false, | |||
| x, y, mb->PMV[0][1], dec->future, true); | |||
| return; | |||
| mv2: | |||
| @@ -353,20 +353,20 @@ mv2: | |||
| if (!frame) | |||
| base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_MV_SPLIT_HALF_MB; | |||
| if (forward) { | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, | |||
| mb->motion_vertical_field_select & PIPE_MPEG12_FS_FIRST_FORWARD, | |||
| x, y, mb->PMV[0][0], dec->past, TRUE); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, | |||
| x, y, mb->PMV[0][0], dec->past, true); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, true, | |||
| mb->motion_vertical_field_select & PIPE_MPEG12_FS_SECOND_FORWARD, | |||
| x, y2, mb->PMV[1][0], dec->past, FALSE); | |||
| x, y2, mb->PMV[1][0], dec->past, false); | |||
| } | |||
| if (backward) { | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, | |||
| mb->motion_vertical_field_select & PIPE_MPEG12_FS_FIRST_BACKWARD, | |||
| x, y, mb->PMV[0][1], dec->future, TRUE); | |||
| x, y, mb->PMV[0][1], dec->future, true); | |||
| nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, | |||
| mb->motion_vertical_field_select & PIPE_MPEG12_FS_SECOND_BACKWARD, | |||
| x, y2, mb->PMV[1][1], dec->future, FALSE); | |||
| x, y2, mb->PMV[1][1], dec->future, false); | |||
| } | |||
| } | |||
| @@ -438,14 +438,14 @@ nouveau_decoder_decode_macroblock(struct pipe_video_codec *decoder, | |||
| mb = (const struct pipe_mpeg12_macroblock *)pipe_mb; | |||
| for (i = 0; i < num_macroblocks; ++i, mb++) { | |||
| if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) { | |||
| nouveau_vpe_mb_dct_header(dec, mb, TRUE); | |||
| nouveau_vpe_mb_dct_header(dec, mb, FALSE); | |||
| nouveau_vpe_mb_dct_header(dec, mb, true); | |||
| nouveau_vpe_mb_dct_header(dec, mb, false); | |||
| } else { | |||
| nouveau_vpe_mb_mv_header(dec, mb, TRUE); | |||
| nouveau_vpe_mb_dct_header(dec, mb, TRUE); | |||
| nouveau_vpe_mb_mv_header(dec, mb, true); | |||
| nouveau_vpe_mb_dct_header(dec, mb, true); | |||
| nouveau_vpe_mb_mv_header(dec, mb, FALSE); | |||
| nouveau_vpe_mb_dct_header(dec, mb, FALSE); | |||
| nouveau_vpe_mb_mv_header(dec, mb, false); | |||
| nouveau_vpe_mb_dct_header(dec, mb, false); | |||
| } | |||
| if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) | |||
| nouveau_vpe_mb_dct_blocks(dec, mb); | |||
| @@ -21,12 +21,12 @@ PUSH_AVAIL(struct nouveau_pushbuf *push) | |||
| return push->end - push->cur; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| PUSH_SPACE(struct nouveau_pushbuf *push, uint32_t size) | |||
| { | |||
| if (PUSH_AVAIL(push) < size) | |||
| return nouveau_pushbuf_space(push, size, 0, 0) == 0; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static INLINE void | |||
| @@ -58,7 +58,7 @@ nv30_clear(struct pipe_context *pipe, unsigned buffers, | |||
| struct pipe_framebuffer_state *fb = &nv30->framebuffer; | |||
| uint32_t colr = 0, zeta = 0, mode = 0; | |||
| if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, TRUE)) | |||
| if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true)) | |||
| return; | |||
| if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) { | |||
| @@ -45,7 +45,7 @@ nv30_context_kick_notify(struct nouveau_pushbuf *push) | |||
| screen = &nv30->screen->base; | |||
| nouveau_fence_next(screen); | |||
| nouveau_fence_update(screen, TRUE); | |||
| nouveau_fence_update(screen, true); | |||
| if (push->bufctx) { | |||
| struct nouveau_bufref *bref; | |||
| @@ -239,7 +239,7 @@ nv30_context_create(struct pipe_screen *pscreen, void *priv) | |||
| nv30->config.aniso = NV40_3D_TEX_WRAP_ANISO_MIP_FILTER_OPTIMIZATION_OFF; | |||
| if (debug_get_bool_option("NV30_SWTNL", FALSE)) | |||
| if (debug_get_bool_option("NV30_SWTNL", false)) | |||
| nv30->draw_flags |= NV30_NEW_SWTNL; | |||
| nv30->sample_mask = 0xffff; | |||
| @@ -52,7 +52,7 @@ struct nv30_context { | |||
| unsigned scissor_off; | |||
| unsigned num_vtxelts; | |||
| int index_bias; | |||
| boolean prim_restart; | |||
| bool prim_restart; | |||
| struct nv30_fragprog *fragprog; | |||
| } state; | |||
| @@ -115,14 +115,14 @@ struct nv30_context { | |||
| uint32_t vbo_user; | |||
| unsigned vbo_min_index; | |||
| unsigned vbo_max_index; | |||
| boolean vbo_push_hint; | |||
| bool vbo_push_hint; | |||
| struct nouveau_heap *blit_vp; | |||
| struct pipe_resource *blit_fp; | |||
| struct pipe_query *render_cond_query; | |||
| unsigned render_cond_mode; | |||
| boolean render_cond_cond; | |||
| bool render_cond_cond; | |||
| }; | |||
| static INLINE struct nv30_context * | |||
| @@ -204,8 +204,8 @@ nv30_draw_init(struct pipe_context *pipe); | |||
| void | |||
| nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info); | |||
| boolean | |||
| nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl); | |||
| bool | |||
| nv30_state_validate(struct nv30_context *nv30, uint32_t mask, bool hwtnl); | |||
| void | |||
| nv30_state_release(struct nv30_context *nv30); | |||
| @@ -79,12 +79,12 @@ nv30_render_allocate_vertices(struct vbuf_render *render, | |||
| PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM, | |||
| render->max_vertex_buffer_bytes); | |||
| if (!r->buffer) | |||
| return FALSE; | |||
| return false; | |||
| r->offset = 0; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void * | |||
| @@ -134,7 +134,7 @@ nv30_render_draw_elements(struct vbuf_render *render, | |||
| NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1); | |||
| } | |||
| if (!nv30_state_validate(nv30, ~0, FALSE)) | |||
| if (!nv30_state_validate(nv30, ~0, false)) | |||
| return; | |||
| BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); | |||
| @@ -179,7 +179,7 @@ nv30_render_draw_arrays(struct vbuf_render *render, unsigned start, uint nr) | |||
| NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1); | |||
| } | |||
| if (!nv30_state_validate(nv30, ~0, FALSE)) | |||
| if (!nv30_state_validate(nv30, ~0, false)) | |||
| return; | |||
| BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1); | |||
| @@ -221,7 +221,7 @@ static const struct { | |||
| [TGSI_SEMANTIC_TEXCOORD] = { EMIT_4F, INTERP_PERSPECTIVE, 8, 7, 0x00004000 }, | |||
| }; | |||
| static boolean | |||
| static bool | |||
| vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx) | |||
| { | |||
| struct nv30_screen *screen = r->nv30->screen; | |||
| @@ -245,7 +245,7 @@ vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx) | |||
| } | |||
| if (emit == EMIT_OMIT) | |||
| return FALSE; | |||
| return false; | |||
| draw_emit_vertex_attr(vinfo, emit, vroute[sem].interp, attrib); | |||
| format = draw_translate_vinfo_format(emit); | |||
| @@ -272,10 +272,10 @@ vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx) | |||
| assert(sem == TGSI_SEMANTIC_TEXCOORD); | |||
| *idx = 0x00001000 << (result - 8); | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv30_render_validate(struct nv30_context *nv30) | |||
| { | |||
| struct nv30_render *r = nv30_render(nv30->draw->render); | |||
| @@ -300,7 +300,7 @@ nv30_render_validate(struct nv30_context *nv30) | |||
| } | |||
| if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog)) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| } | |||
| @@ -370,7 +370,7 @@ nv30_render_validate(struct nv30_context *nv30) | |||
| } | |||
| vinfo->size /= 4; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -519,6 +519,6 @@ nv30_draw_init(struct pipe_context *pipe) | |||
| draw_set_rasterize_stage(draw, stage); | |||
| draw_wide_line_threshold(draw, 10000000.f); | |||
| draw_wide_point_threshold(draw, 10000000.f); | |||
| draw_wide_point_sprites(draw, TRUE); | |||
| draw_wide_point_sprites(draw, true); | |||
| nv30->draw = draw; | |||
| } | |||
| @@ -68,7 +68,7 @@ nv30_fragprog_validate(struct nv30_context *nv30) | |||
| struct nouveau_pushbuf *push = nv30->base.pushbuf; | |||
| struct nouveau_object *eng3d = nv30->screen->eng3d; | |||
| struct nv30_fragprog *fp = nv30->fragprog.program; | |||
| boolean upload = FALSE; | |||
| bool upload = false; | |||
| int i; | |||
| if (!fp->translated) { | |||
| @@ -76,7 +76,7 @@ nv30_fragprog_validate(struct nv30_context *nv30) | |||
| if (!fp->translated) | |||
| return; | |||
| upload = TRUE; | |||
| upload = true; | |||
| } | |||
| /* update constants, also needs to be done on every fp switch as we | |||
| @@ -93,7 +93,7 @@ nv30_fragprog_validate(struct nv30_context *nv30) | |||
| if (!memcmp(&fp->insn[off], &cbuf[idx], 4 * 4)) | |||
| continue; | |||
| memcpy(&fp->insn[off], &cbuf[idx], 4 * 4); | |||
| upload = TRUE; | |||
| upload = true; | |||
| } | |||
| } | |||
| @@ -54,7 +54,7 @@ nv30_miptree_get_handle(struct pipe_screen *pscreen, | |||
| unsigned stride; | |||
| if (!mt || !mt->base.bo) | |||
| return FALSE; | |||
| return false; | |||
| stride = mt->level[0].pitch; | |||
| @@ -372,7 +372,7 @@ nv30_miptree_create(struct pipe_screen *pscreen, | |||
| } | |||
| if (!mt->uniform_pitch) | |||
| mt->swizzled = TRUE; | |||
| mt->swizzled = true; | |||
| size = 0; | |||
| for (l = 0; l <= pt->last_level; l++) { | |||
| @@ -47,7 +47,7 @@ struct push_context { | |||
| struct translate *translate; | |||
| boolean primitive_restart; | |||
| bool primitive_restart; | |||
| uint32_t prim; | |||
| uint32_t restart_index; | |||
| }; | |||
| @@ -199,7 +199,7 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info) | |||
| { | |||
| struct push_context ctx; | |||
| unsigned i, index_size; | |||
| boolean apply_bias = info->indexed && info->index_bias; | |||
| bool apply_bias = info->indexed && info->index_bias; | |||
| ctx.push = nv30->base.pushbuf; | |||
| ctx.translate = nv30->vertex->translate; | |||
| @@ -241,7 +241,7 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info) | |||
| } else { | |||
| ctx.idxbuf = NULL; | |||
| index_size = 0; | |||
| ctx.primitive_restart = FALSE; | |||
| ctx.primitive_restart = false; | |||
| ctx.restart_index = 0; | |||
| } | |||
| @@ -208,7 +208,7 @@ nv30_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| if (ntfy1) { | |||
| while (ntfy1[3] & 0xff000000) { | |||
| if (!wait) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| switch (q->type) { | |||
| @@ -228,7 +228,7 @@ nv30_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| } | |||
| *res64 = q->result; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -42,12 +42,12 @@ nv30_memory_barrier(struct pipe_context *pipe, unsigned flags) | |||
| if (!nv30->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| } | |||
| if (nv30->idxbuf.buffer && | |||
| nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| } | |||
| } | |||
| @@ -32,7 +32,7 @@ struct nv30_miptree { | |||
| struct nv30_miptree_level level[13]; | |||
| uint32_t uniform_pitch; | |||
| uint32_t layer_size; | |||
| boolean swizzled; | |||
| bool swizzled; | |||
| unsigned ms_mode; | |||
| unsigned ms_x:1; | |||
| unsigned ms_y:1; | |||
| @@ -315,12 +315,12 @@ nv30_screen_is_format_supported(struct pipe_screen *pscreen, | |||
| unsigned bindings) | |||
| { | |||
| if (sample_count > 4) | |||
| return FALSE; | |||
| return false; | |||
| if (!(0x00000017 & (1 << sample_count))) | |||
| return FALSE; | |||
| return false; | |||
| if (!util_format_is_supported(format, bindings)) { | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| /* transfers & shared are always supported */ | |||
| @@ -658,6 +658,6 @@ nv30_screen_create(struct nouveau_device *dev) | |||
| nouveau_pushbuf_kick(push, push->channel); | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE); | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, false); | |||
| return pscreen; | |||
| } | |||
| @@ -80,7 +80,7 @@ struct nv30_vertprog { | |||
| struct tgsi_shader_info info; | |||
| struct draw_vertex_shader *draw; | |||
| boolean translated; | |||
| bool translated; | |||
| unsigned enabled_ucps; | |||
| uint16_t texcoord[10]; | |||
| @@ -109,7 +109,7 @@ struct nv30_fragprog { | |||
| struct tgsi_shader_info info; | |||
| struct draw_fragment_shader *draw; | |||
| boolean translated; | |||
| bool translated; | |||
| uint32_t *insn; | |||
| unsigned insn_len; | |||
| @@ -453,8 +453,8 @@ nv30_state_context_switch(struct nv30_context *nv30) | |||
| nv30->base.pushbuf->user_priv = &nv30->bufctx; | |||
| } | |||
| boolean | |||
| nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl) | |||
| bool | |||
| nv30_state_validate(struct nv30_context *nv30, uint32_t mask, bool hwtnl) | |||
| { | |||
| struct nouveau_screen *screen = &nv30->screen->base; | |||
| struct nouveau_pushbuf *push = nv30->base.pushbuf; | |||
| @@ -494,7 +494,7 @@ nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl) | |||
| nouveau_pushbuf_bufctx(push, bctx); | |||
| if (nouveau_pushbuf_validate(push)) { | |||
| nouveau_pushbuf_bufctx(push, NULL); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| /*XXX*/ | |||
| @@ -528,7 +528,7 @@ nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl) | |||
| } | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -41,30 +41,30 @@ | |||
| * of different ways. | |||
| */ | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv30_transfer_scaled(struct nv30_rect *src, struct nv30_rect *dst) | |||
| { | |||
| if (src->x1 - src->x0 != dst->x1 - dst->x0) | |||
| return TRUE; | |||
| return true; | |||
| if (src->y1 - src->y0 != dst->y1 - dst->y0) | |||
| return TRUE; | |||
| return FALSE; | |||
| return true; | |||
| return false; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv30_transfer_blit(XFER_ARGS) | |||
| { | |||
| if (nv30->screen->eng3d->oclass < NV40_3D_CLASS) | |||
| return FALSE; | |||
| return false; | |||
| if (dst->offset & 63 || dst->pitch & 63 || dst->d > 1) | |||
| return FALSE; | |||
| return false; | |||
| if (dst->w < 2 || dst->h < 2) | |||
| return FALSE; | |||
| return false; | |||
| if (dst->cpp > 4 || (dst->cpp == 1 && !dst->pitch)) | |||
| return FALSE; | |||
| return false; | |||
| if (src->cpp > 4) | |||
| return FALSE; | |||
| return TRUE; | |||
| return false; | |||
| return true; | |||
| } | |||
| static INLINE struct nouveau_heap * | |||
| @@ -368,29 +368,29 @@ nv30_transfer_rect_blit(XFER_ARGS) | |||
| PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP); | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv30_transfer_sifm(XFER_ARGS) | |||
| { | |||
| if (!src->pitch || (src->w | src->h) > 1024 || src->w < 2 || src->h < 2) | |||
| return FALSE; | |||
| return false; | |||
| if (src->d > 1 || dst->d > 1) | |||
| return FALSE; | |||
| return false; | |||
| if (dst->offset & 63) | |||
| return FALSE; | |||
| return false; | |||
| if (!dst->pitch) { | |||
| if ((dst->w | dst->h) > 2048 || dst->w < 2 || dst->h < 2) | |||
| return FALSE; | |||
| return false; | |||
| } else { | |||
| if (dst->domain != NOUVEAU_BO_VRAM) | |||
| return FALSE; | |||
| return false; | |||
| if (dst->pitch & 63) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -481,14 +481,14 @@ nv30_transfer_rect_sifm(XFER_ARGS) | |||
| * that name is still accurate on nv4x) error. | |||
| */ | |||
| static boolean | |||
| static bool | |||
| nv30_transfer_m2mf(XFER_ARGS) | |||
| { | |||
| if (!src->pitch || !dst->pitch) | |||
| return FALSE; | |||
| return false; | |||
| if (nv30_transfer_scaled(src, dst)) | |||
| return FALSE; | |||
| return TRUE; | |||
| return false; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -540,12 +540,12 @@ nv30_transfer_rect_m2mf(XFER_ARGS) | |||
| } | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv30_transfer_cpu(XFER_ARGS) | |||
| { | |||
| if (nv30_transfer_scaled(src, dst)) | |||
| return FALSE; | |||
| return TRUE; | |||
| return false; | |||
| return true; | |||
| } | |||
| static char * | |||
| @@ -653,7 +653,7 @@ nv30_transfer_rect(struct nv30_context *nv30, enum nv30_transfer_filter filter, | |||
| { | |||
| static const struct { | |||
| char *name; | |||
| boolean (*possible)(XFER_ARGS); | |||
| bool (*possible)(XFER_ARGS); | |||
| void (*execute)(XFER_ARGS); | |||
| } *method, methods[] = { | |||
| { "m2mf", nv30_transfer_m2mf, nv30_transfer_rect_m2mf }, | |||
| @@ -119,7 +119,7 @@ nv30_prevalidate_vbufs(struct nv30_context *nv30) | |||
| } else { | |||
| nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART); | |||
| } | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| } | |||
| } | |||
| } | |||
| @@ -160,7 +160,7 @@ nv30_update_user_vbufs(struct nv30_context *nv30) | |||
| NOUVEAU_BO_LOW | NOUVEAU_BO_RD, | |||
| 0, NV30_3D_VTXBUF_DMA1); | |||
| } | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| } | |||
| static INLINE void | |||
| @@ -224,7 +224,7 @@ nv30_vbo_validate(struct nv30_context *nv30) | |||
| for (i = 0; i < vertex->num_elements; i++) { | |||
| struct nv04_resource *res; | |||
| unsigned offset; | |||
| boolean user; | |||
| bool user; | |||
| ve = &vertex->pipe[i]; | |||
| vb = &nv30->vtxbuf[ve->vertex_buffer_index]; | |||
| @@ -262,7 +262,7 @@ nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements, | |||
| return NULL; | |||
| memcpy(so->pipe, elements, sizeof(*elements) * num_elements); | |||
| so->num_elements = num_elements; | |||
| so->need_conversion = FALSE; | |||
| so->need_conversion = false; | |||
| transkey.nr_elements = 0; | |||
| transkey.output_stride = 0; | |||
| @@ -285,7 +285,7 @@ nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements, | |||
| return NULL; | |||
| } | |||
| so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw; | |||
| so->need_conversion = TRUE; | |||
| so->need_conversion = true; | |||
| } | |||
| if (1) { | |||
| @@ -453,7 +453,7 @@ nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, | |||
| } | |||
| static void | |||
| nv30_draw_elements(struct nv30_context *nv30, boolean shorten, | |||
| nv30_draw_elements(struct nv30_context *nv30, bool shorten, | |||
| unsigned mode, unsigned start, unsigned count, | |||
| unsigned instance_count, int32_t index_bias) | |||
| { | |||
| @@ -563,7 +563,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS))) | |||
| nv30_update_user_vbufs(nv30); | |||
| nv30_state_validate(nv30, ~0, TRUE); | |||
| nv30_state_validate(nv30, ~0, true); | |||
| if (nv30->draw_flags) { | |||
| nv30_render_vbo(pipe, info); | |||
| return; | |||
| @@ -577,17 +577,17 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| if (!nv30->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| } | |||
| if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer && | |||
| nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nv30->base.vbo_dirty = TRUE; | |||
| nv30->base.vbo_dirty = true; | |||
| if (nv30->base.vbo_dirty) { | |||
| BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1); | |||
| PUSH_DATA (push, 0); | |||
| nv30->base.vbo_dirty = FALSE; | |||
| nv30->base.vbo_dirty = false; | |||
| } | |||
| if (!info->indexed) { | |||
| @@ -595,7 +595,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| info->mode, info->start, info->count, | |||
| info->instance_count); | |||
| } else { | |||
| boolean shorten = info->max_index <= 65535; | |||
| bool shorten = info->max_index <= 65535; | |||
| if (info->primitive_restart != nv30->state.prim_restart) { | |||
| if (info->primitive_restart) { | |||
| @@ -604,7 +604,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| PUSH_DATA (push, info->restart_index); | |||
| if (info->restart_index > 65535) | |||
| shorten = FALSE; | |||
| shorten = false; | |||
| } else { | |||
| BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1); | |||
| PUSH_DATA (push, 0); | |||
| @@ -616,7 +616,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| PUSH_DATA (push, info->restart_index); | |||
| if (info->restart_index > 65535) | |||
| shorten = FALSE; | |||
| shorten = false; | |||
| } | |||
| nv30_draw_elements(nv30, shorten, | |||
| @@ -48,7 +48,7 @@ nv30_vertprog_destroy(struct nv30_vertprog *vp) | |||
| vp->consts = NULL; | |||
| vp->nr_consts = 0; | |||
| vp->translated = FALSE; | |||
| vp->translated = false; | |||
| } | |||
| void | |||
| @@ -58,8 +58,8 @@ nv30_vertprog_validate(struct nv30_context *nv30) | |||
| struct nouveau_object *eng3d = nv30->screen->eng3d; | |||
| struct nv30_vertprog *vp = nv30->vertprog.program; | |||
| struct nv30_fragprog *fp = nv30->fragprog.program; | |||
| boolean upload_code = FALSE; | |||
| boolean upload_data = FALSE; | |||
| bool upload_code = false; | |||
| bool upload_data = false; | |||
| unsigned i; | |||
| if (nv30->dirty & NV30_NEW_FRAGPROG) { | |||
| @@ -125,7 +125,7 @@ nv30_vertprog_validate(struct nv30_context *nv30) | |||
| } | |||
| } | |||
| upload_code = TRUE; | |||
| upload_code = true; | |||
| } | |||
| if (vp->nr_consts && !vp->data) { | |||
| @@ -166,8 +166,8 @@ nv30_vertprog_validate(struct nv30_context *nv30) | |||
| } | |||
| } | |||
| upload_code = TRUE; | |||
| upload_data = TRUE; | |||
| upload_code = true; | |||
| upload_data = true; | |||
| } | |||
| if (vp->nr_consts) { | |||
| @@ -442,7 +442,7 @@ tgsi_mask(uint tgsi) | |||
| return mask; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc, | |||
| const struct tgsi_full_instruction *finst) | |||
| { | |||
| @@ -455,7 +455,7 @@ nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc, | |||
| int i; | |||
| if (finst->Instruction.Opcode == TGSI_OPCODE_END) | |||
| return TRUE; | |||
| return true; | |||
| for (i = 0; i < finst->Instruction.NumSrcRegs; i++) { | |||
| const struct tgsi_full_src_register *fsrc; | |||
| @@ -525,7 +525,7 @@ nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc, | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("bad src file\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| } | |||
| @@ -868,12 +868,12 @@ nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc, | |||
| default: | |||
| NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| out: | |||
| release_temps(fpc); | |||
| return TRUE; | |||
| return true; | |||
| nv3x_cflow: | |||
| { | |||
| static int warned = 0; | |||
| @@ -887,7 +887,7 @@ nv3x_cflow: | |||
| goto out; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_fragprog_parse_decl_input(struct nvfx_fpc *fpc, | |||
| const struct tgsi_full_declaration *fdec) | |||
| { | |||
| @@ -917,17 +917,17 @@ nvfx_fragprog_parse_decl_input(struct nvfx_fpc *fpc, | |||
| case TGSI_SEMANTIC_GENERIC: | |||
| case TGSI_SEMANTIC_PCOORD: | |||
| /* will be assigned to remaining TC slots later */ | |||
| return TRUE; | |||
| return true; | |||
| default: | |||
| assert(0); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_fragprog_assign_generic(struct nvfx_fpc *fpc, | |||
| const struct tgsi_full_declaration *fdec) | |||
| { | |||
| @@ -954,16 +954,16 @@ nvfx_fragprog_assign_generic(struct nvfx_fpc *fpc, | |||
| } | |||
| hw = NVFX_FP_OP_INPUT_SRC_TC(hw); | |||
| fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| } | |||
| return FALSE; | |||
| return false; | |||
| default: | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_fragprog_parse_decl_output(struct nvfx_fpc *fpc, | |||
| const struct tgsi_full_declaration *fdec) | |||
| { | |||
| @@ -984,20 +984,20 @@ nvfx_fragprog_parse_decl_output(struct nvfx_fpc *fpc, | |||
| } | |||
| if(hw > ((fpc->is_nv4x) ? 4 : 2)) { | |||
| NOUVEAU_ERR("bad rcol index\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("bad output semantic\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw); | |||
| fpc->r_temps |= (1ULL << hw); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_fragprog_prepare(struct nvfx_fpc *fpc) | |||
| { | |||
| struct tgsi_parse_context p; | |||
| @@ -1081,17 +1081,17 @@ nvfx_fragprog_prepare(struct nvfx_fpc *fpc) | |||
| fpc->r_temps_discard = 0ULL; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| out_err: | |||
| FREE(fpc->r_temp); | |||
| fpc->r_temp = NULL; | |||
| tgsi_parse_free(&p); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE) | |||
| DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", false) | |||
| void | |||
| _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp) | |||
| @@ -1100,7 +1100,7 @@ _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp) | |||
| struct nvfx_fpc *fpc = NULL; | |||
| struct util_dynarray insns; | |||
| fp->translated = FALSE; | |||
| fp->translated = false; | |||
| fp->point_sprite_control = 0; | |||
| fp->vp_or = 0; | |||
| @@ -1182,7 +1182,7 @@ _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp) | |||
| debug_printf("\n"); | |||
| } | |||
| fp->translated = TRUE; | |||
| fp->translated = true; | |||
| out: | |||
| tgsi_parse_free(&parse); | |||
| @@ -449,7 +449,7 @@ struct nvfx_insn | |||
| }; | |||
| static INLINE struct nvfx_insn | |||
| nvfx_insn(boolean sat, unsigned op, int unit, struct nvfx_reg dst, unsigned mask, struct nvfx_src s0, struct nvfx_src s1, struct nvfx_src s2) | |||
| nvfx_insn(bool sat, unsigned op, int unit, struct nvfx_reg dst, unsigned mask, struct nvfx_src s0, struct nvfx_src s1, struct nvfx_src s2) | |||
| { | |||
| struct nvfx_insn insn = { | |||
| .op = op, | |||
| @@ -529,7 +529,7 @@ struct nv30_vertprog; | |||
| void | |||
| _nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp); | |||
| boolean | |||
| bool | |||
| _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp); | |||
| #endif | |||
| @@ -455,7 +455,7 @@ tgsi_mask(uint tgsi) | |||
| return mask; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc, | |||
| unsigned idx, const struct tgsi_full_instruction *finst) | |||
| { | |||
| @@ -466,7 +466,7 @@ nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc, | |||
| struct nvfx_insn insn; | |||
| struct nvfx_relocation reloc; | |||
| struct nvfx_loop_entry loop; | |||
| boolean sat = FALSE; | |||
| bool sat = false; | |||
| int mask; | |||
| int ai = -1, ci = -1, ii = -1; | |||
| int i; | |||
| @@ -524,25 +524,25 @@ nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc, | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("bad src file\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| } | |||
| for (i = 0; i < finst->Instruction.NumSrcRegs; i++) { | |||
| if(src[i].reg.type < 0) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS && | |||
| finst->Instruction.Opcode != TGSI_OPCODE_ARL) | |||
| return FALSE; | |||
| return false; | |||
| final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]); | |||
| mask = tgsi_mask(finst->Dst[0].Register.WriteMask); | |||
| if(finst->Instruction.Saturate) { | |||
| assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL); | |||
| if (vpc->is_nv4x) | |||
| sat = TRUE; | |||
| sat = true; | |||
| else | |||
| if(dst.type != NVFXSR_TEMP) | |||
| dst = temp(vpc); | |||
| @@ -793,7 +793,7 @@ nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc, | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if(finst->Instruction.Saturate && !vpc->is_nv4x) { | |||
| @@ -804,10 +804,10 @@ nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc, | |||
| } | |||
| release_temps(vpc); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc, | |||
| const struct tgsi_full_declaration *fdec) | |||
| { | |||
| @@ -825,7 +825,7 @@ nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc, | |||
| vpc->r_result[idx] = temp(vpc); | |||
| vpc->r_temps_discard = 0; | |||
| vpc->cvtx_idx = idx; | |||
| return TRUE; | |||
| return true; | |||
| case TGSI_SEMANTIC_COLOR: | |||
| if (fdec->Semantic.Index == 0) { | |||
| hw = NVFX_VP(INST_DEST_COL0); | |||
| @@ -834,7 +834,7 @@ nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc, | |||
| hw = NVFX_VP(INST_DEST_COL1); | |||
| } else { | |||
| NOUVEAU_ERR("bad colour semantic index\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| break; | |||
| case TGSI_SEMANTIC_BCOLOR: | |||
| @@ -845,7 +845,7 @@ nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc, | |||
| hw = NVFX_VP(INST_DEST_BFC1); | |||
| } else { | |||
| NOUVEAU_ERR("bad bcolour semantic index\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| break; | |||
| case TGSI_SEMANTIC_FOG: | |||
| @@ -868,22 +868,22 @@ nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc, | |||
| if (i == num_texcoords) { | |||
| vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| break; | |||
| case TGSI_SEMANTIC_EDGEFLAG: | |||
| vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0); | |||
| return TRUE; | |||
| return true; | |||
| default: | |||
| NOUVEAU_ERR("bad output semantic\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvfx_vertprog_prepare(struct nvfx_vpc *vpc) | |||
| { | |||
| struct tgsi_parse_context p; | |||
| @@ -924,7 +924,7 @@ nvfx_vertprog_prepare(struct nvfx_vpc *vpc) | |||
| break; | |||
| case TGSI_FILE_OUTPUT: | |||
| if (!nvfx_vertprog_parse_decl_output(vpc, fdec)) | |||
| return FALSE; | |||
| return false; | |||
| break; | |||
| default: | |||
| break; | |||
| @@ -961,12 +961,12 @@ nvfx_vertprog_prepare(struct nvfx_vpc *vpc) | |||
| } | |||
| vpc->r_temps_discard = 0; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", FALSE) | |||
| DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false) | |||
| boolean | |||
| bool | |||
| _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp) | |||
| { | |||
| struct tgsi_parse_context parse; | |||
| @@ -975,13 +975,13 @@ _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp) | |||
| struct util_dynarray insns; | |||
| int i, ucps; | |||
| vp->translated = FALSE; | |||
| vp->translated = false; | |||
| vp->nr_insns = 0; | |||
| vp->nr_consts = 0; | |||
| vpc = CALLOC_STRUCT(nvfx_vpc); | |||
| if (!vpc) | |||
| return FALSE; | |||
| return false; | |||
| vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0; | |||
| vpc->vp = vp; | |||
| vpc->pipe = vp->pipe; | |||
| @@ -990,7 +990,7 @@ _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp) | |||
| if (!nvfx_vertprog_prepare(vpc)) { | |||
| FREE(vpc); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| /* Redirect post-transform vertex position to a temp if user clip | |||
| @@ -1108,7 +1108,7 @@ _nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp) | |||
| debug_printf("\n"); | |||
| } | |||
| vp->translated = TRUE; | |||
| vp->translated = true; | |||
| out: | |||
| tgsi_parse_free(&parse); | |||
| @@ -191,8 +191,8 @@ nv50_blit_eng2d_get_mask(const struct pipe_blit_info *info) | |||
| # define nv50_format_table nvc0_format_table | |||
| #endif | |||
| /* return TRUE for formats that can be converted among each other by NVC0_2D */ | |||
| static INLINE boolean | |||
| /* return true for formats that can be converted among each other by NVC0_2D */ | |||
| static INLINE bool | |||
| nv50_2d_dst_format_faithful(enum pipe_format format) | |||
| { | |||
| const uint64_t mask = | |||
| @@ -201,7 +201,7 @@ nv50_2d_dst_format_faithful(enum pipe_format format) | |||
| uint8_t id = nv50_format_table[format].rt; | |||
| return (id >= 0xc0) && (mask & (1ULL << (id - 0xc0))); | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv50_2d_src_format_faithful(enum pipe_format format) | |||
| { | |||
| const uint64_t mask = | |||
| @@ -211,7 +211,7 @@ nv50_2d_src_format_faithful(enum pipe_format format) | |||
| return (id >= 0xc0) && (mask & (1ULL << (id - 0xc0))); | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv50_2d_format_supported(enum pipe_format format) | |||
| { | |||
| uint8_t id = nv50_format_table[format].rt; | |||
| @@ -219,7 +219,7 @@ nv50_2d_format_supported(enum pipe_format format) | |||
| (NV50_ENG2D_SUPPORTED_FORMATS & (1ULL << (id - 0xc0))); | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv50_2d_dst_format_ops_supported(enum pipe_format format) | |||
| { | |||
| uint8_t id = nv50_format_table[format].rt; | |||
| @@ -64,12 +64,12 @@ nv50_memory_barrier(struct pipe_context *pipe, unsigned flags) | |||
| if (!nv50->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| } | |||
| if (nv50->idxbuf.buffer && | |||
| nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| for (s = 0; s < 3 && !nv50->cb_dirty; ++s) { | |||
| uint32_t valid = nv50->constbuf_valid[s]; | |||
| @@ -87,7 +87,7 @@ nv50_memory_barrier(struct pipe_context *pipe, unsigned flags) | |||
| continue; | |||
| if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nv50->cb_dirty = TRUE; | |||
| nv50->cb_dirty = true; | |||
| } | |||
| } | |||
| } | |||
| @@ -100,9 +100,9 @@ nv50_default_kick_notify(struct nouveau_pushbuf *push) | |||
| if (screen) { | |||
| nouveau_fence_next(&screen->base); | |||
| nouveau_fence_update(&screen->base, TRUE); | |||
| nouveau_fence_update(&screen->base, true); | |||
| if (screen->cur_ctx) | |||
| screen->cur_ctx->state.flushed = TRUE; | |||
| screen->cur_ctx->state.flushed = true; | |||
| } | |||
| } | |||
| @@ -310,7 +310,7 @@ nv50_create(struct pipe_screen *pscreen, void *priv) | |||
| nv50->base.invalidate_resource_storage = nv50_invalidate_resource_storage; | |||
| if (screen->base.device->chipset < 0x84 || | |||
| debug_get_bool_option("NOUVEAU_PMPEG", FALSE)) { | |||
| debug_get_bool_option("NOUVEAU_PMPEG", false)) { | |||
| /* PMPEG */ | |||
| nouveau_context_init_vdec(&nv50->base); | |||
| } else if (screen->base.device->chipset < 0x98 || | |||
| @@ -351,7 +351,7 @@ out_err: | |||
| } | |||
| void | |||
| nv50_bufctx_fence(struct nouveau_bufctx *bufctx, boolean on_flush) | |||
| nv50_bufctx_fence(struct nouveau_bufctx *bufctx, bool on_flush) | |||
| { | |||
| struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending; | |||
| struct nouveau_list *it; | |||
| @@ -91,7 +91,7 @@ | |||
| struct nv50_blitctx; | |||
| boolean nv50_blitctx_create(struct nv50_context *); | |||
| bool nv50_blitctx_create(struct nv50_context *); | |||
| struct nv50_context { | |||
| struct nouveau_context base; | |||
| @@ -102,7 +102,7 @@ struct nv50_context { | |||
| struct nouveau_bufctx *bufctx; | |||
| uint32_t dirty; | |||
| boolean cb_dirty; | |||
| bool cb_dirty; | |||
| struct nv50_graph_state state; | |||
| @@ -152,12 +152,12 @@ struct nv50_context { | |||
| unsigned sample_mask; | |||
| unsigned min_samples; | |||
| boolean vbo_push_hint; | |||
| bool vbo_push_hint; | |||
| uint32_t rt_array_mode; | |||
| struct pipe_query *cond_query; | |||
| boolean cond_cond; /* inverted rendering condition */ | |||
| bool cond_cond; /* inverted rendering condition */ | |||
| uint cond_mode; | |||
| uint32_t cond_condmode; /* the calculated condition */ | |||
| @@ -188,7 +188,7 @@ nv50_context_shader_stage(unsigned pipe) | |||
| /* nv50_context.c */ | |||
| struct pipe_context *nv50_create(struct pipe_screen *, void *); | |||
| void nv50_bufctx_fence(struct nouveau_bufctx *, boolean on_flush); | |||
| void nv50_bufctx_fence(struct nouveau_bufctx *, bool on_flush); | |||
| void nv50_default_kick_notify(struct nouveau_pushbuf *); | |||
| @@ -202,7 +202,7 @@ void nv50_query_pushbuf_submit(struct nouveau_pushbuf *, | |||
| void nv84_query_fifo_wait(struct nouveau_pushbuf *, struct pipe_query *); | |||
| void nva0_so_target_save_offset(struct pipe_context *, | |||
| struct pipe_stream_output_target *, | |||
| unsigned index, boolean seralize); | |||
| unsigned index, bool seralize); | |||
| #define NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET (PIPE_QUERY_TYPES + 0) | |||
| @@ -221,8 +221,8 @@ extern void nv50_init_state_functions(struct nv50_context *); | |||
| /* nv50_state_validate.c */ | |||
| /* @words: check for space before emitting relocs */ | |||
| extern boolean nv50_state_validate(struct nv50_context *, uint32_t state_mask, | |||
| unsigned space_words); | |||
| extern bool nv50_state_validate(struct nv50_context *, uint32_t state_mask, | |||
| unsigned space_words); | |||
| /* nv50_surface.c */ | |||
| extern void nv50_clear(struct pipe_context *, unsigned buffers, | |||
| @@ -30,7 +30,7 @@ | |||
| uint32_t | |||
| nv50_tex_choose_tile_dims_helper(unsigned nx, unsigned ny, unsigned nz, | |||
| boolean is_3d) | |||
| bool is_3d) | |||
| { | |||
| uint32_t tile_mode = 0x000; | |||
| @@ -59,13 +59,13 @@ nv50_tex_choose_tile_dims_helper(unsigned nx, unsigned ny, unsigned nz, | |||
| } | |||
| static uint32_t | |||
| nv50_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, boolean is_3d) | |||
| nv50_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, bool is_3d) | |||
| { | |||
| return nv50_tex_choose_tile_dims_helper(nx, ny * 2, nz, is_3d); | |||
| } | |||
| static uint32_t | |||
| nv50_mt_choose_storage_type(struct nv50_miptree *mt, boolean compressed) | |||
| nv50_mt_choose_storage_type(struct nv50_miptree *mt, bool compressed) | |||
| { | |||
| const unsigned ms = util_logbase2(mt->base.base.nr_samples); | |||
| uint32_t tile_flags; | |||
| @@ -184,7 +184,7 @@ nv50_miptree_get_handle(struct pipe_screen *pscreen, | |||
| unsigned stride; | |||
| if (!mt || !mt->base.bo) | |||
| return FALSE; | |||
| return false; | |||
| stride = mt->level[0].pitch; | |||
| @@ -204,7 +204,7 @@ const struct u_resource_vtbl nv50_miptree_vtbl = | |||
| u_default_transfer_inline_write /* transfer_inline_write */ | |||
| }; | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nv50_miptree_init_ms_mode(struct nv50_miptree *mt) | |||
| { | |||
| switch (mt->base.base.nr_samples) { | |||
| @@ -228,12 +228,12 @@ nv50_miptree_init_ms_mode(struct nv50_miptree *mt) | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("invalid nr_samples: %u\n", mt->base.base.nr_samples); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| boolean | |||
| bool | |||
| nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align) | |||
| { | |||
| struct pipe_resource *pt = &mt->base.base; | |||
| @@ -241,12 +241,12 @@ nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align) | |||
| unsigned h = pt->height0; | |||
| if (util_format_is_depth_or_stencil(pt->format)) | |||
| return FALSE; | |||
| return false; | |||
| if ((pt->last_level > 0) || (pt->depth0 > 1) || (pt->array_size > 1)) | |||
| return FALSE; | |||
| return false; | |||
| if (mt->ms_x | mt->ms_y) | |||
| return FALSE; | |||
| return false; | |||
| mt->level[0].pitch = align(pt->width0 * blocksize, pitch_align); | |||
| @@ -256,7 +256,7 @@ nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align) | |||
| mt->total_size = mt->level[0].pitch * h; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -335,7 +335,7 @@ nv50_miptree_create(struct pipe_screen *pscreen, | |||
| struct nouveau_device *dev = nouveau_screen(pscreen)->device; | |||
| struct nv50_miptree *mt = CALLOC_STRUCT(nv50_miptree); | |||
| struct pipe_resource *pt = &mt->base.base; | |||
| boolean compressed = dev->drm_version >= 0x01000101; | |||
| bool compressed = dev->drm_version >= 0x01000101; | |||
| int ret; | |||
| union nouveau_bo_config bo_config; | |||
| uint32_t bo_flags; | |||
| @@ -104,7 +104,7 @@ nv50_vertprog_assign_slots(struct nv50_ir_prog_info *info) | |||
| prog->vp.bfc[info->out[i].si] = i; | |||
| break; | |||
| case TGSI_SEMANTIC_LAYER: | |||
| prog->gp.has_layer = TRUE; | |||
| prog->gp.has_layer = true; | |||
| prog->gp.layerid = n; | |||
| break; | |||
| case TGSI_SEMANTIC_VIEWPORT_INDEX: | |||
| @@ -316,7 +316,7 @@ nv50_program_create_strmout_state(const struct nv50_ir_prog_info *info, | |||
| return so; | |||
| } | |||
| boolean | |||
| bool | |||
| nv50_program_translate(struct nv50_program *prog, uint16_t chipset) | |||
| { | |||
| struct nv50_ir_prog_info *info; | |||
| @@ -325,7 +325,7 @@ nv50_program_translate(struct nv50_program *prog, uint16_t chipset) | |||
| info = CALLOC_STRUCT(nv50_ir_prog_info); | |||
| if (!info) | |||
| return FALSE; | |||
| return false; | |||
| info->type = prog->type; | |||
| info->target = chipset; | |||
| @@ -410,7 +410,7 @@ out: | |||
| return !ret; | |||
| } | |||
| boolean | |||
| bool | |||
| nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog) | |||
| { | |||
| struct nouveau_heap *heap; | |||
| @@ -423,7 +423,7 @@ nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog) | |||
| case PIPE_SHADER_FRAGMENT: heap = nv50->screen->gp_code_heap; break; | |||
| default: | |||
| assert(!"invalid program type"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| ret = nouveau_heap_alloc(heap, size, prog, &prog->mem); | |||
| @@ -440,7 +440,7 @@ nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog) | |||
| ret = nouveau_heap_alloc(heap, size, prog, &prog->mem); | |||
| if (ret) { | |||
| NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| } | |||
| prog->code_base = prog->mem->start; | |||
| @@ -448,10 +448,10 @@ nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog) | |||
| ret = nv50_tls_realloc(nv50->screen, prog->tls_space); | |||
| if (ret < 0) { | |||
| nouveau_heap_free(&prog->mem); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (ret > 0) | |||
| nv50->state.new_tls_space = TRUE; | |||
| nv50->state.new_tls_space = true; | |||
| if (prog->fixups) | |||
| nv50_ir_relocate_code(prog->fixups, prog->code, prog->code_base, 0, 0); | |||
| @@ -463,7 +463,7 @@ nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog) | |||
| BEGIN_NV04(nv50->base.pushbuf, NV50_3D(CODE_CB_FLUSH), 1); | |||
| PUSH_DATA (nv50->base.pushbuf, 0); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -53,7 +53,7 @@ struct nv50_program { | |||
| struct pipe_shader_state pipe; | |||
| ubyte type; | |||
| boolean translated; | |||
| bool translated; | |||
| uint32_t *code; | |||
| unsigned code_size; | |||
| @@ -104,8 +104,8 @@ struct nv50_program { | |||
| struct nv50_stream_output_state *so; | |||
| }; | |||
| boolean nv50_program_translate(struct nv50_program *, uint16_t chipset); | |||
| boolean nv50_program_upload_code(struct nv50_context *, struct nv50_program *); | |||
| bool nv50_program_translate(struct nv50_program *, uint16_t chipset); | |||
| bool nv50_program_upload_code(struct nv50_context *, struct nv50_program *); | |||
| void nv50_program_destroy(struct nv50_context *, struct nv50_program *); | |||
| #endif /* __NV50_PROG_H__ */ | |||
| @@ -23,7 +23,7 @@ struct push_context { | |||
| struct translate *translate; | |||
| boolean primitive_restart; | |||
| bool primitive_restart; | |||
| uint32_t prim; | |||
| uint32_t restart_index; | |||
| uint32_t instance_id; | |||
| @@ -212,7 +212,7 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info) | |||
| unsigned i, index_size; | |||
| unsigned inst_count = info->instance_count; | |||
| unsigned vert_count = info->count; | |||
| boolean apply_bias = info->indexed && info->index_bias; | |||
| bool apply_bias = info->indexed && info->index_bias; | |||
| ctx.push = nv50->base.pushbuf; | |||
| ctx.translate = nv50->vertex->translate; | |||
| @@ -258,12 +258,12 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info) | |||
| NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n"); | |||
| return; | |||
| } | |||
| pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count); | |||
| pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count); | |||
| vert_count /= targ->stride; | |||
| } | |||
| ctx.idxbuf = NULL; | |||
| index_size = 0; | |||
| ctx.primitive_restart = FALSE; | |||
| ctx.primitive_restart = false; | |||
| ctx.restart_index = 0; | |||
| } | |||
| @@ -48,7 +48,7 @@ struct nv50_query { | |||
| uint32_t base; | |||
| uint32_t offset; /* base + i * 32 */ | |||
| uint8_t state; | |||
| boolean is64bit; | |||
| bool is64bit; | |||
| int nesting; /* only used for occlusion queries */ | |||
| struct nouveau_mm_allocation *mm; | |||
| struct nouveau_fence *fence; | |||
| @@ -62,7 +62,7 @@ nv50_query(struct pipe_query *pipe) | |||
| return (struct nv50_query *)pipe; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) | |||
| { | |||
| struct nv50_screen *screen = nv50->screen; | |||
| @@ -81,17 +81,17 @@ nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) | |||
| if (size) { | |||
| q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base); | |||
| if (!q->bo) | |||
| return FALSE; | |||
| return false; | |||
| q->offset = q->base; | |||
| ret = nouveau_bo_map(q->bo, 0, screen->base.client); | |||
| if (ret) { | |||
| nv50_query_allocate(nv50, q, 0); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base); | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -154,8 +154,8 @@ nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) | |||
| struct nv50_query *q = nv50_query(pq); | |||
| /* For occlusion queries we have to change the storage, because a previous | |||
| * query might set the initial render conition to FALSE even *after* we re- | |||
| * initialized it to TRUE. | |||
| * query might set the initial render conition to false even *after* we re- | |||
| * initialized it to true. | |||
| */ | |||
| if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { | |||
| q->offset += 32; | |||
| @@ -167,7 +167,7 @@ nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) | |||
| * query ? | |||
| */ | |||
| q->data[0] = q->sequence; /* initialize sequence */ | |||
| q->data[1] = 1; /* initial render condition = TRUE */ | |||
| q->data[1] = 1; /* initial render condition = true */ | |||
| q->data[4] = q->sequence + 1; /* for comparison COND_MODE */ | |||
| q->data[5] = 0; | |||
| } | |||
| @@ -269,7 +269,7 @@ nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq) | |||
| nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5)); | |||
| break; | |||
| case PIPE_QUERY_TIMESTAMP_DISJOINT: | |||
| /* This query is not issued on GPU because disjoint is forced to FALSE */ | |||
| /* This query is not issued on GPU because disjoint is forced to false */ | |||
| q->state = NV50_QUERY_STATE_READY; | |||
| break; | |||
| default: | |||
| @@ -301,7 +301,7 @@ nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| struct nv50_query *q = nv50_query(pq); | |||
| uint64_t *res64 = (uint64_t *)result; | |||
| uint32_t *res32 = (uint32_t *)result; | |||
| boolean *res8 = (boolean *)result; | |||
| uint8_t *res8 = (uint8_t *)result; | |||
| uint64_t *data64 = (uint64_t *)q->data; | |||
| int i; | |||
| @@ -315,16 +315,16 @@ nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| q->state = NV50_QUERY_STATE_FLUSHED; | |||
| PUSH_KICK(nv50->base.pushbuf); | |||
| } | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client)) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| q->state = NV50_QUERY_STATE_READY; | |||
| switch (q->type) { | |||
| case PIPE_QUERY_GPU_FINISHED: | |||
| res8[0] = TRUE; | |||
| res8[0] = true; | |||
| break; | |||
| case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */ | |||
| res64[0] = q->data[1] - q->data[5]; | |||
| @@ -346,7 +346,7 @@ nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| break; | |||
| case PIPE_QUERY_TIMESTAMP_DISJOINT: | |||
| res64[0] = 1000000000; | |||
| res8[8] = FALSE; | |||
| res8[8] = false; | |||
| break; | |||
| case PIPE_QUERY_TIME_ELAPSED: | |||
| res64[0] = data64[1] - data64[3]; | |||
| @@ -355,10 +355,10 @@ nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| res32[0] = q->data[1]; | |||
| break; | |||
| default: | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -385,7 +385,7 @@ nv50_render_condition(struct pipe_context *pipe, | |||
| struct nouveau_pushbuf *push = nv50->base.pushbuf; | |||
| struct nv50_query *q; | |||
| uint32_t cond; | |||
| boolean wait = | |||
| bool wait = | |||
| mode != PIPE_RENDER_COND_NO_WAIT && | |||
| mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; | |||
| @@ -399,7 +399,7 @@ nv50_render_condition(struct pipe_context *pipe, | |||
| case PIPE_QUERY_SO_OVERFLOW_PREDICATE: | |||
| cond = condition ? NV50_3D_COND_MODE_EQUAL : | |||
| NV50_3D_COND_MODE_NOT_EQUAL; | |||
| wait = TRUE; | |||
| wait = true; | |||
| break; | |||
| case PIPE_QUERY_OCCLUSION_COUNTER: | |||
| case PIPE_QUERY_OCCLUSION_PREDICATE: | |||
| @@ -468,7 +468,7 @@ nv50_query_pushbuf_submit(struct nouveau_pushbuf *push, | |||
| void | |||
| nva0_so_target_save_offset(struct pipe_context *pipe, | |||
| struct pipe_stream_output_target *ptarg, | |||
| unsigned index, boolean serialize) | |||
| unsigned index, bool serialize) | |||
| { | |||
| struct nv50_so_target *targ = nv50_so_target(ptarg); | |||
| @@ -35,7 +35,7 @@ nv50_screen_init_resource_functions(struct pipe_screen *pscreen); | |||
| uint32_t | |||
| nv50_tex_choose_tile_dims_helper(unsigned nx, unsigned ny, unsigned nz, | |||
| boolean is_3d); | |||
| bool is_3d); | |||
| struct nv50_miptree_level { | |||
| uint32_t offset; | |||
| @@ -50,7 +50,7 @@ struct nv50_miptree { | |||
| struct nv50_miptree_level level[NV50_MAX_TEXTURE_LEVELS]; | |||
| uint32_t total_size; | |||
| uint32_t layer_stride; | |||
| boolean layout_3d; /* TRUE if layer count varies with mip level */ | |||
| bool layout_3d; /* true if layer count varies with mip level */ | |||
| uint8_t ms_x; /* log2 of number of samples in x/y dimension */ | |||
| uint8_t ms_y; | |||
| uint8_t ms_mode; | |||
| @@ -70,7 +70,7 @@ nv50_miptree(struct pipe_resource *pt) | |||
| /* Internal functions: | |||
| */ | |||
| boolean | |||
| bool | |||
| nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align); | |||
| struct pipe_resource * | |||
| @@ -51,19 +51,19 @@ nv50_screen_is_format_supported(struct pipe_screen *pscreen, | |||
| unsigned bindings) | |||
| { | |||
| if (sample_count > 8) | |||
| return FALSE; | |||
| return false; | |||
| if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */ | |||
| return FALSE; | |||
| return false; | |||
| if (sample_count == 8 && util_format_get_blocksizebits(format) >= 128) | |||
| return FALSE; | |||
| return false; | |||
| if (!util_format_is_supported(format, bindings)) | |||
| return FALSE; | |||
| return false; | |||
| switch (format) { | |||
| case PIPE_FORMAT_Z16_UNORM: | |||
| if (nv50_screen(pscreen)->tesla->oclass < NVA0_3D_CLASS) | |||
| return FALSE; | |||
| return false; | |||
| break; | |||
| default: | |||
| break; | |||
| @@ -455,7 +455,7 @@ nv50_screen_init_hwctx(struct nv50_screen *screen) | |||
| BEGIN_NV04(push, NV50_3D(UNK1400_LANES), 1); | |||
| PUSH_DATA (push, 0xf); | |||
| if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", TRUE)) { | |||
| if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) { | |||
| BEGIN_NV04(push, NV50_3D(WATCHDOG_TIMER), 1); | |||
| PUSH_DATA (push, 0x18); | |||
| } | |||
| @@ -735,7 +735,7 @@ nv50_screen_create(struct nouveau_device *dev) | |||
| nv50_screen_init_resource_functions(pscreen); | |||
| if (screen->base.device->chipset < 0x84 || | |||
| debug_get_bool_option("NOUVEAU_PMPEG", FALSE)) { | |||
| debug_get_bool_option("NOUVEAU_PMPEG", false)) { | |||
| /* PMPEG */ | |||
| nouveau_screen_init_vdec(&screen->base); | |||
| } else if (screen->base.device->chipset < 0x98 || | |||
| @@ -891,7 +891,7 @@ nv50_screen_create(struct nouveau_device *dev) | |||
| nv50_screen_init_hwctx(screen); | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE); | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, false); | |||
| return pscreen; | |||
| @@ -32,14 +32,14 @@ struct nv50_graph_state { | |||
| uint32_t semantic_color; | |||
| uint32_t semantic_psize; | |||
| int32_t index_bias; | |||
| boolean uniform_buffer_bound[3]; | |||
| boolean prim_restart; | |||
| boolean point_sprite; | |||
| boolean rt_serialize; | |||
| boolean flushed; | |||
| boolean rasterizer_discard; | |||
| bool uniform_buffer_bound[3]; | |||
| bool prim_restart; | |||
| bool point_sprite; | |||
| bool rt_serialize; | |||
| bool flushed; | |||
| bool rasterizer_discard; | |||
| uint8_t tls_required; | |||
| boolean new_tls_space; | |||
| bool new_tls_space; | |||
| uint8_t num_vtxbufs; | |||
| uint8_t num_vtxelts; | |||
| uint8_t num_textures[3]; | |||
| @@ -103,7 +103,7 @@ nv50_screen(struct pipe_screen *screen) | |||
| return (struct nv50_screen *)screen; | |||
| } | |||
| boolean nv50_blitter_create(struct nv50_screen *); | |||
| bool nv50_blitter_create(struct nv50_screen *); | |||
| void nv50_blitter_destroy(struct nv50_screen *); | |||
| int nv50_screen_tic_alloc(struct nv50_screen *, void *); | |||
| @@ -60,7 +60,7 @@ nv50_constbufs_validate(struct nv50_context *nv50) | |||
| continue; | |||
| } | |||
| if (!nv50->state.uniform_buffer_bound[s]) { | |||
| nv50->state.uniform_buffer_bound[s] = TRUE; | |||
| nv50->state.uniform_buffer_bound[s] = true; | |||
| BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1); | |||
| PUSH_DATA (push, (b << 12) | (i << 8) | p | 1); | |||
| } | |||
| @@ -104,23 +104,23 @@ nv50_constbufs_validate(struct nv50_context *nv50) | |||
| PUSH_DATA (push, (i << 8) | p | 0); | |||
| } | |||
| if (i == 0) | |||
| nv50->state.uniform_buffer_bound[s] = FALSE; | |||
| nv50->state.uniform_buffer_bound[s] = false; | |||
| } | |||
| } | |||
| } | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog) | |||
| { | |||
| if (!prog->translated) { | |||
| prog->translated = nv50_program_translate( | |||
| prog, nv50->screen->base.device->chipset); | |||
| if (!prog->translated) | |||
| return FALSE; | |||
| return false; | |||
| } else | |||
| if (prog->mem) | |||
| return TRUE; | |||
| return true; | |||
| return nv50_program_upload_code(nv50, prog); | |||
| } | |||
| @@ -136,7 +136,7 @@ nv50_program_update_context_state(struct nv50_context *nv50, | |||
| nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS); | |||
| if (!nv50->state.tls_required || nv50->state.new_tls_space) | |||
| BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo); | |||
| nv50->state.new_tls_space = FALSE; | |||
| nv50->state.new_tls_space = false; | |||
| nv50->state.tls_required |= 1 << stage; | |||
| } else { | |||
| if (nv50->state.tls_required == (1 << stage)) | |||
| @@ -243,11 +243,11 @@ nv50_sprite_coords_validate(struct nv50_context *nv50) | |||
| for (i = 0; i < 8; ++i) | |||
| PUSH_DATA(push, 0); | |||
| nv50->state.point_sprite = FALSE; | |||
| nv50->state.point_sprite = false; | |||
| } | |||
| return; | |||
| } else { | |||
| nv50->state.point_sprite = TRUE; | |||
| nv50->state.point_sprite = true; | |||
| } | |||
| memset(pntc, 0, sizeof(pntc)); | |||
| @@ -646,7 +646,7 @@ nv50_stream_output_validate(struct nv50_context *nv50) | |||
| nv50_query_pushbuf_submit(push, targ->pq, 0x4); | |||
| } else { | |||
| PUSH_DATA(push, 0); | |||
| targ->clean = FALSE; | |||
| targ->clean = false; | |||
| } | |||
| } else { | |||
| const unsigned limit = targ->pipe.buffer_size / | |||
| @@ -116,7 +116,7 @@ nv50_blend_state_create(struct pipe_context *pipe, | |||
| { | |||
| struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj); | |||
| int i; | |||
| boolean emit_common_func = cso->rt[0].blend_enable; | |||
| bool emit_common_func = cso->rt[0].blend_enable; | |||
| uint32_t ms; | |||
| if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) { | |||
| @@ -137,11 +137,11 @@ nv50_blend_state_create(struct pipe_context *pipe, | |||
| for (i = 0; i < 8; ++i) { | |||
| SB_DATA(so, cso->rt[i].blend_enable); | |||
| if (cso->rt[i].blend_enable) | |||
| emit_common_func = TRUE; | |||
| emit_common_func = true; | |||
| } | |||
| if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) { | |||
| emit_common_func = FALSE; | |||
| emit_common_func = false; | |||
| for (i = 0; i < 8; ++i) { | |||
| if (!cso->rt[i].blend_enable) | |||
| @@ -808,7 +808,7 @@ nv50_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, | |||
| pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res); | |||
| nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE; | |||
| nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false; | |||
| if (nv50->constbuf[s][i].user) { | |||
| nv50->constbuf[s][i].u.data = cb->user_buffer; | |||
| nv50->constbuf[s][i].size = MIN2(cb->buffer_size, 0x10000); | |||
| @@ -1041,7 +1041,7 @@ nv50_so_target_create(struct pipe_context *pipe, | |||
| } else { | |||
| targ->pq = NULL; | |||
| } | |||
| targ->clean = TRUE; | |||
| targ->clean = true; | |||
| targ->pipe.buffer_size = size; | |||
| targ->pipe.buffer_offset = offset; | |||
| @@ -1075,32 +1075,32 @@ nv50_set_stream_output_targets(struct pipe_context *pipe, | |||
| { | |||
| struct nv50_context *nv50 = nv50_context(pipe); | |||
| unsigned i; | |||
| boolean serialize = TRUE; | |||
| const boolean can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS; | |||
| bool serialize = true; | |||
| const bool can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS; | |||
| assert(num_targets <= 4); | |||
| for (i = 0; i < num_targets; ++i) { | |||
| const boolean changed = nv50->so_target[i] != targets[i]; | |||
| const boolean append = (offsets[i] == (unsigned)-1); | |||
| const bool changed = nv50->so_target[i] != targets[i]; | |||
| const bool append = (offsets[i] == (unsigned)-1); | |||
| if (!changed && append) | |||
| continue; | |||
| nv50->so_targets_dirty |= 1 << i; | |||
| if (can_resume && changed && nv50->so_target[i]) { | |||
| nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize); | |||
| serialize = FALSE; | |||
| serialize = false; | |||
| } | |||
| if (targets[i] && !append) | |||
| nv50_so_target(targets[i])->clean = TRUE; | |||
| nv50_so_target(targets[i])->clean = true; | |||
| pipe_so_target_reference(&nv50->so_target[i], targets[i]); | |||
| } | |||
| for (; i < nv50->num_so_targets; ++i) { | |||
| if (can_resume && nv50->so_target[i]) { | |||
| nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize); | |||
| serialize = FALSE; | |||
| serialize = false; | |||
| } | |||
| pipe_so_target_reference(&nv50->so_target[i], NULL); | |||
| nv50->so_targets_dirty |= 1 << i; | |||
| @@ -82,7 +82,7 @@ nv50_validate_fb(struct nv50_context *nv50) | |||
| ms_mode = mt->ms_mode; | |||
| if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING) | |||
| nv50->state.rt_serialize = TRUE; | |||
| nv50->state.rt_serialize = true; | |||
| mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; | |||
| mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING; | |||
| @@ -111,7 +111,7 @@ nv50_validate_fb(struct nv50_context *nv50) | |||
| ms_mode = mt->ms_mode; | |||
| if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING) | |||
| nv50->state.rt_serialize = TRUE; | |||
| nv50->state.rt_serialize = true; | |||
| mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; | |||
| mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING; | |||
| @@ -486,7 +486,7 @@ static struct state_validate { | |||
| }; | |||
| #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0])) | |||
| boolean | |||
| bool | |||
| nv50_state_validate(struct nv50_context *nv50, uint32_t mask, unsigned words) | |||
| { | |||
| uint32_t state_mask; | |||
| @@ -508,19 +508,19 @@ nv50_state_validate(struct nv50_context *nv50, uint32_t mask, unsigned words) | |||
| nv50->dirty &= ~state_mask; | |||
| if (nv50->state.rt_serialize) { | |||
| nv50->state.rt_serialize = FALSE; | |||
| nv50->state.rt_serialize = false; | |||
| BEGIN_NV04(nv50->base.pushbuf, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); | |||
| PUSH_DATA (nv50->base.pushbuf, 0); | |||
| } | |||
| nv50_bufctx_fence(nv50->bufctx_3d, FALSE); | |||
| nv50_bufctx_fence(nv50->bufctx_3d, false); | |||
| } | |||
| nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx_3d); | |||
| ret = nouveau_pushbuf_validate(nv50->base.pushbuf); | |||
| if (unlikely(nv50->state.flushed)) { | |||
| nv50->state.flushed = FALSE; | |||
| nv50_bufctx_fence(nv50->bufctx_3d, TRUE); | |||
| nv50->state.flushed = false; | |||
| nv50_bufctx_fence(nv50->bufctx_3d, true); | |||
| } | |||
| return !ret; | |||
| } | |||
| @@ -41,7 +41,7 @@ struct nv50_constbuf { | |||
| } u; | |||
| uint32_t size; /* max 65536 */ | |||
| uint32_t offset; | |||
| boolean user; /* should only be TRUE if u.data is valid and non-NULL */ | |||
| bool user; /* should only be true if u.data is valid and non-NULL */ | |||
| }; | |||
| struct nv50_vertex_element { | |||
| @@ -56,7 +56,7 @@ struct nv50_vertex_stateobj { | |||
| unsigned num_elements; | |||
| uint32_t instance_elts; | |||
| uint32_t instance_bufs; | |||
| boolean need_conversion; | |||
| bool need_conversion; | |||
| unsigned vertex_size; | |||
| unsigned packet_vertex_limit; | |||
| struct nv50_vertex_element element[0]; | |||
| @@ -66,7 +66,7 @@ struct nv50_so_target { | |||
| struct pipe_stream_output_target pipe; | |||
| struct pipe_query *pq; | |||
| unsigned stride; | |||
| boolean clean; | |||
| bool clean; | |||
| }; | |||
| static INLINE struct nv50_so_target * | |||
| @@ -50,7 +50,7 @@ | |||
| #include "nv50/nv50_blit.h" | |||
| static INLINE uint8_t | |||
| nv50_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal) | |||
| nv50_2d_format(enum pipe_format format, bool dst, bool dst_src_equal) | |||
| { | |||
| uint8_t id = nv50_format_table[format].rt; | |||
| @@ -76,7 +76,7 @@ nv50_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal) | |||
| static int | |||
| nv50_2d_texture_set(struct nouveau_pushbuf *push, int dst, | |||
| struct nv50_miptree *mt, unsigned level, unsigned layer, | |||
| enum pipe_format pformat, boolean dst_src_pformat_equal) | |||
| enum pipe_format pformat, bool dst_src_pformat_equal) | |||
| { | |||
| struct nouveau_bo *bo = mt->base.bo; | |||
| uint32_t width, height, depth; | |||
| @@ -153,7 +153,7 @@ nv50_2d_texture_do_copy(struct nouveau_pushbuf *push, | |||
| const enum pipe_format dfmt = dst->base.base.format; | |||
| const enum pipe_format sfmt = src->base.base.format; | |||
| int ret; | |||
| boolean eqfmt = dfmt == sfmt; | |||
| bool eqfmt = dfmt == sfmt; | |||
| if (!PUSH_SPACE(push, 2 * 16 + 32)) | |||
| return PIPE_ERROR; | |||
| @@ -196,7 +196,7 @@ nv50_resource_copy_region(struct pipe_context *pipe, | |||
| { | |||
| struct nv50_context *nv50 = nv50_context(pipe); | |||
| int ret; | |||
| boolean m2mf; | |||
| bool m2mf; | |||
| unsigned dst_layer = dstz, src_layer = src_box->z; | |||
| if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { | |||
| @@ -658,7 +658,7 @@ nv50_blitter_make_vp(struct nv50_blitter *blit) | |||
| }; | |||
| blit->vp.type = PIPE_SHADER_VERTEX; | |||
| blit->vp.translated = TRUE; | |||
| blit->vp.translated = true; | |||
| blit->vp.code = (uint32_t *)code; /* const_cast */ | |||
| blit->vp.code_size = sizeof(code); | |||
| blit->vp.max_gpr = 4; | |||
| @@ -687,24 +687,24 @@ nv50_blitter_make_fp(struct pipe_context *pipe, | |||
| const unsigned target = nv50_blit_get_tgsi_texture_target(ptarg); | |||
| boolean tex_rgbaz = FALSE; | |||
| boolean tex_s = FALSE; | |||
| boolean cvt_un8 = FALSE; | |||
| bool tex_rgbaz = false; | |||
| bool tex_s = false; | |||
| bool cvt_un8 = false; | |||
| if (mode != NV50_BLIT_MODE_PASS && | |||
| mode != NV50_BLIT_MODE_Z24X8 && | |||
| mode != NV50_BLIT_MODE_X8Z24) | |||
| tex_s = TRUE; | |||
| tex_s = true; | |||
| if (mode != NV50_BLIT_MODE_X24S8 && | |||
| mode != NV50_BLIT_MODE_S8X24 && | |||
| mode != NV50_BLIT_MODE_XS) | |||
| tex_rgbaz = TRUE; | |||
| tex_rgbaz = true; | |||
| if (mode != NV50_BLIT_MODE_PASS && | |||
| mode != NV50_BLIT_MODE_ZS && | |||
| mode != NV50_BLIT_MODE_XS) | |||
| cvt_un8 = TRUE; | |||
| cvt_un8 = true; | |||
| ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT); | |||
| if (!ureg) | |||
| @@ -1271,7 +1271,7 @@ nv50_blit_eng2d(struct nv50_context *nv50, const struct pipe_blit_info *info) | |||
| int i; | |||
| uint32_t mode; | |||
| uint32_t mask = nv50_blit_eng2d_get_mask(info); | |||
| boolean b; | |||
| bool b; | |||
| mode = nv50_blit_get_filter(info) ? | |||
| NV50_2D_BLIT_CONTROL_FILTER_BILINEAR : | |||
| @@ -1410,7 +1410,7 @@ nv50_blit_eng2d(struct nv50_context *nv50, const struct pipe_blit_info *info) | |||
| PUSH_DATA (push, srcy >> 32); | |||
| } | |||
| } | |||
| nv50_bufctx_fence(nv50->bufctx, FALSE); | |||
| nv50_bufctx_fence(nv50->bufctx, false); | |||
| nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D); | |||
| @@ -1433,66 +1433,66 @@ nv50_blit(struct pipe_context *pipe, const struct pipe_blit_info *info) | |||
| { | |||
| struct nv50_context *nv50 = nv50_context(pipe); | |||
| struct nouveau_pushbuf *push = nv50->base.pushbuf; | |||
| boolean eng3d = FALSE; | |||
| bool eng3d = FALSE; | |||
| if (util_format_is_depth_or_stencil(info->dst.resource->format)) { | |||
| if (!(info->mask & PIPE_MASK_ZS)) | |||
| return; | |||
| if (info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT || | |||
| info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| if (info->filter != PIPE_TEX_FILTER_NEAREST) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else { | |||
| if (!(info->mask & PIPE_MASK_RGBA)) | |||
| return; | |||
| if (info->mask != PIPE_MASK_RGBA) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } | |||
| if (nv50_miptree(info->src.resource)->layout_3d) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else | |||
| if (info->src.box.depth != info->dst.box.depth) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| debug_printf("blit: cannot filter array or cube textures in z direction"); | |||
| } | |||
| if (!eng3d && info->dst.format != info->src.format) { | |||
| if (!nv50_2d_dst_format_faithful(info->dst.format) || | |||
| !nv50_2d_src_format_faithful(info->src.format)) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else | |||
| if (!nv50_2d_src_format_faithful(info->src.format)) { | |||
| if (!util_format_is_luminance(info->src.format)) { | |||
| if (util_format_is_intensity(info->src.format)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| else | |||
| if (!nv50_2d_dst_format_ops_supported(info->dst.format)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| else | |||
| eng3d = !nv50_2d_format_supported(info->src.format); | |||
| } | |||
| } else | |||
| if (util_format_is_luminance_alpha(info->src.format)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } | |||
| if (info->src.resource->nr_samples == 8 && | |||
| info->dst.resource->nr_samples <= 1) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| /* FIXME: can't make this work with eng2d anymore */ | |||
| if ((info->src.resource->nr_samples | 1) != | |||
| (info->dst.resource->nr_samples | 1)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| /* FIXME: find correct src coordinate adjustments */ | |||
| if ((info->src.box.width != info->dst.box.width && | |||
| info->src.box.width != -info->dst.box.width) || | |||
| (info->src.box.height != info->dst.box.height && | |||
| info->src.box.height != -info->dst.box.height)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| if (nv50->screen->num_occlusion_queries_active) { | |||
| BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); | |||
| @@ -1516,13 +1516,13 @@ nv50_flush_resource(struct pipe_context *ctx, | |||
| { | |||
| } | |||
| boolean | |||
| bool | |||
| nv50_blitter_create(struct nv50_screen *screen) | |||
| { | |||
| screen->blitter = CALLOC_STRUCT(nv50_blitter); | |||
| if (!screen->blitter) { | |||
| NOUVEAU_ERR("failed to allocate blitter struct\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| pipe_mutex_init(screen->blitter->mutex); | |||
| @@ -1530,7 +1530,7 @@ nv50_blitter_create(struct nv50_screen *screen) | |||
| nv50_blitter_make_vp(screen->blitter); | |||
| nv50_blitter_make_sampler(screen->blitter); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -1553,20 +1553,20 @@ nv50_blitter_destroy(struct nv50_screen *screen) | |||
| FREE(blitter); | |||
| } | |||
| boolean | |||
| bool | |||
| nv50_blitctx_create(struct nv50_context *nv50) | |||
| { | |||
| nv50->blit = CALLOC_STRUCT(nv50_blitctx); | |||
| if (!nv50->blit) { | |||
| NOUVEAU_ERR("failed to allocate blit context\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| nv50->blit->nv50 = nv50; | |||
| nv50->blit->rast.pipe.half_pixel_center = 1; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -32,7 +32,7 @@ | |||
| NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK) | |||
| static INLINE uint32_t | |||
| nv50_tic_swizzle(uint32_t tc, unsigned swz, boolean tex_int) | |||
| nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int) | |||
| { | |||
| switch (swz) { | |||
| case PIPE_SWIZZLE_RED: | |||
| @@ -79,7 +79,7 @@ nv50_create_texture_view(struct pipe_context *pipe, | |||
| uint32_t depth; | |||
| struct nv50_tic_entry *view; | |||
| struct nv50_miptree *mt = nv50_miptree(texture); | |||
| boolean tex_int; | |||
| bool tex_int; | |||
| view = MALLOC_STRUCT(nv50_tic_entry); | |||
| if (!view) | |||
| @@ -193,7 +193,7 @@ nv50_create_texture_view(struct pipe_context *pipe, | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("invalid texture target: %d\n", mt->base.base.target); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000; | |||
| @@ -221,13 +221,13 @@ nv50_create_texture_view(struct pipe_context *pipe, | |||
| return &view->pipe; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv50_validate_tic(struct nv50_context *nv50, int s) | |||
| { | |||
| struct nouveau_pushbuf *push = nv50->base.pushbuf; | |||
| struct nouveau_bo *txc = nv50->screen->txc; | |||
| unsigned i; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS); | |||
| for (i = 0; i < nv50->num_textures[s]; ++i) { | |||
| @@ -270,7 +270,7 @@ nv50_validate_tic(struct nv50_context *nv50, int s) | |||
| BEGIN_NI04(push, NV50_2D(SIFC_DATA), 8); | |||
| PUSH_DATAp(push, &tic->tic[0], 8); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } else | |||
| if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { | |||
| BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1); | |||
| @@ -316,7 +316,7 @@ nv50_validate_tic(struct nv50_context *nv50, int s) | |||
| void nv50_validate_textures(struct nv50_context *nv50) | |||
| { | |||
| boolean need_flush; | |||
| bool need_flush; | |||
| need_flush = nv50_validate_tic(nv50, 0); | |||
| need_flush |= nv50_validate_tic(nv50, 1); | |||
| @@ -328,12 +328,12 @@ void nv50_validate_textures(struct nv50_context *nv50) | |||
| } | |||
| } | |||
| static boolean | |||
| static bool | |||
| nv50_validate_tsc(struct nv50_context *nv50, int s) | |||
| { | |||
| struct nouveau_pushbuf *push = nv50->base.pushbuf; | |||
| unsigned i; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS); | |||
| for (i = 0; i < nv50->num_samplers[s]; ++i) { | |||
| @@ -350,7 +350,7 @@ nv50_validate_tsc(struct nv50_context *nv50, int s) | |||
| nv50_sifc_linear_u8(&nv50->base, nv50->screen->txc, | |||
| 65536 + tsc->id * 32, | |||
| NOUVEAU_BO_VRAM, 32, tsc->tsc); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } | |||
| nv50->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32); | |||
| @@ -368,7 +368,7 @@ nv50_validate_tsc(struct nv50_context *nv50, int s) | |||
| void nv50_validate_samplers(struct nv50_context *nv50) | |||
| { | |||
| boolean need_flush; | |||
| bool need_flush; | |||
| need_flush = nv50_validate_tsc(nv50, 0); | |||
| need_flush |= nv50_validate_tsc(nv50, 1); | |||
| @@ -58,7 +58,7 @@ nv50_vertex_state_create(struct pipe_context *pipe, | |||
| so->num_elements = num_elements; | |||
| so->instance_elts = 0; | |||
| so->instance_bufs = 0; | |||
| so->need_conversion = FALSE; | |||
| so->need_conversion = false; | |||
| memset(so->vb_access_size, 0, sizeof(so->vb_access_size)); | |||
| @@ -89,7 +89,7 @@ nv50_vertex_state_create(struct pipe_context *pipe, | |||
| return NULL; | |||
| } | |||
| so->element[i].state = nv50_format_table[fmt].vtx; | |||
| so->need_conversion = TRUE; | |||
| so->need_conversion = true; | |||
| } | |||
| so->element[i].state |= i; | |||
| @@ -229,7 +229,7 @@ nv50_upload_user_buffers(struct nv50_context *nv50, | |||
| BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, NOUVEAU_BO_GART | | |||
| NOUVEAU_BO_RD, bo); | |||
| } | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| } | |||
| static void | |||
| @@ -275,7 +275,7 @@ nv50_update_user_vbufs(struct nv50_context *nv50) | |||
| PUSH_DATAh(push, address[b] + ve->src_offset); | |||
| PUSH_DATA (push, address[b] + ve->src_offset); | |||
| } | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| } | |||
| static INLINE void | |||
| @@ -316,7 +316,7 @@ nv50_vertex_arrays_validate(struct nv50_context *nv50) | |||
| struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer); | |||
| if (buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { | |||
| buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| break; | |||
| } | |||
| } | |||
| @@ -590,7 +590,7 @@ nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, | |||
| } | |||
| static void | |||
| nv50_draw_elements(struct nv50_context *nv50, boolean shorten, | |||
| nv50_draw_elements(struct nv50_context *nv50, bool shorten, | |||
| unsigned mode, unsigned start, unsigned count, | |||
| unsigned instance_count, int32_t index_bias) | |||
| { | |||
| @@ -751,9 +751,9 @@ nv50_draw_vbo_kick_notify(struct nouveau_pushbuf *chan) | |||
| { | |||
| struct nv50_screen *screen = chan->user_priv; | |||
| nouveau_fence_update(&screen->base, TRUE); | |||
| nouveau_fence_update(&screen->base, true); | |||
| nv50_bufctx_fence(screen->cur_ctx->bufctx_3d, TRUE); | |||
| nv50_bufctx_fence(screen->cur_ctx->bufctx_3d, true); | |||
| } | |||
| void | |||
| @@ -806,7 +806,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| continue; | |||
| if (res->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nv50->cb_dirty = TRUE; | |||
| nv50->cb_dirty = true; | |||
| } | |||
| } | |||
| @@ -814,7 +814,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| if (nv50->cb_dirty) { | |||
| BEGIN_NV04(push, NV50_3D(CODE_CB_FLUSH), 1); | |||
| PUSH_DATA (push, 0); | |||
| nv50->cb_dirty = FALSE; | |||
| nv50->cb_dirty = false; | |||
| } | |||
| if (nv50->vbo_fifo) { | |||
| @@ -835,21 +835,21 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| if (!nv50->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| } | |||
| if (!nv50->base.vbo_dirty && nv50->idxbuf.buffer && | |||
| nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nv50->base.vbo_dirty = TRUE; | |||
| nv50->base.vbo_dirty = true; | |||
| if (nv50->base.vbo_dirty) { | |||
| BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FLUSH), 1); | |||
| PUSH_DATA (push, 0); | |||
| nv50->base.vbo_dirty = FALSE; | |||
| nv50->base.vbo_dirty = false; | |||
| } | |||
| if (info->indexed) { | |||
| boolean shorten = info->max_index <= 65535; | |||
| bool shorten = info->max_index <= 65535; | |||
| if (info->primitive_restart != nv50->state.prim_restart) { | |||
| if (info->primitive_restart) { | |||
| @@ -858,7 +858,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| PUSH_DATA (push, info->restart_index); | |||
| if (info->restart_index > 65535) | |||
| shorten = FALSE; | |||
| shorten = false; | |||
| } else { | |||
| BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 1); | |||
| PUSH_DATA (push, 0); | |||
| @@ -870,7 +870,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| PUSH_DATA (push, info->restart_index); | |||
| if (info->restart_index > 65535) | |||
| shorten = FALSE; | |||
| shorten = false; | |||
| } | |||
| nv50_draw_elements(nv50, shorten, | |||
| @@ -121,51 +121,51 @@ nvc0_screen_compute_setup(struct nvc0_screen *screen, | |||
| return 0; | |||
| } | |||
| boolean | |||
| bool | |||
| nvc0_compute_validate_program(struct nvc0_context *nvc0) | |||
| { | |||
| struct nvc0_program *prog = nvc0->compprog; | |||
| if (prog->mem) | |||
| return TRUE; | |||
| return true; | |||
| if (!prog->translated) { | |||
| prog->translated = nvc0_program_translate( | |||
| prog, nvc0->screen->base.device->chipset); | |||
| if (!prog->translated) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (unlikely(!prog->code_size)) | |||
| return FALSE; | |||
| return false; | |||
| if (likely(prog->code_size)) { | |||
| if (nvc0_program_upload_code(nvc0, prog)) { | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| BEGIN_NVC0(push, NVC0_COMPUTE(FLUSH), 1); | |||
| PUSH_DATA (push, NVC0_COMPUTE_FLUSH_CODE); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| } | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvc0_compute_state_validate(struct nvc0_context *nvc0) | |||
| { | |||
| if (!nvc0_compute_validate_program(nvc0)) | |||
| return FALSE; | |||
| return false; | |||
| /* TODO: textures, samplers, surfaces, global memory buffers */ | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, FALSE); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, false); | |||
| nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_cp); | |||
| if (unlikely(nouveau_pushbuf_validate(nvc0->base.pushbuf))) | |||
| return FALSE; | |||
| return false; | |||
| if (unlikely(nvc0->state.flushed)) | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, TRUE); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| @@ -4,7 +4,7 @@ | |||
| #include "nv50/nv50_defs.xml.h" | |||
| #include "nvc0/nvc0_compute.xml.h" | |||
| boolean | |||
| bool | |||
| nvc0_compute_validate_program(struct nvc0_context *nvc0); | |||
| #endif /* NVC0_COMPUTE_H */ | |||
| @@ -63,12 +63,12 @@ nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags) | |||
| if (!nvc0->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nvc0->base.vbo_dirty = TRUE; | |||
| nvc0->base.vbo_dirty = true; | |||
| } | |||
| if (nvc0->idxbuf.buffer && | |||
| nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nvc0->base.vbo_dirty = TRUE; | |||
| nvc0->base.vbo_dirty = true; | |||
| for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) { | |||
| uint32_t valid = nvc0->constbuf_valid[s]; | |||
| @@ -86,7 +86,7 @@ nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags) | |||
| continue; | |||
| if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) | |||
| nvc0->cb_dirty = TRUE; | |||
| nvc0->cb_dirty = true; | |||
| } | |||
| } | |||
| } | |||
| @@ -164,9 +164,9 @@ nvc0_default_kick_notify(struct nouveau_pushbuf *push) | |||
| if (screen) { | |||
| nouveau_fence_next(&screen->base); | |||
| nouveau_fence_update(&screen->base, TRUE); | |||
| nouveau_fence_update(&screen->base, true); | |||
| if (screen->cur_ctx) | |||
| screen->cur_ctx->state.flushed = TRUE; | |||
| screen->cur_ctx->state.flushed = true; | |||
| NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1); | |||
| } | |||
| } | |||
| @@ -378,7 +378,7 @@ out_err: | |||
| void | |||
| nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx, | |||
| boolean on_flush) | |||
| bool on_flush) | |||
| { | |||
| struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending; | |||
| struct nouveau_list *it; | |||
| @@ -93,7 +93,7 @@ | |||
| struct nvc0_blitctx; | |||
| boolean nvc0_blitctx_create(struct nvc0_context *); | |||
| bool nvc0_blitctx_create(struct nvc0_context *); | |||
| void nvc0_blitctx_destroy(struct nvc0_context *); | |||
| struct nvc0_context { | |||
| @@ -130,7 +130,7 @@ struct nvc0_context { | |||
| struct nvc0_constbuf constbuf[6][NVC0_MAX_PIPE_CONSTBUFS]; | |||
| uint16_t constbuf_dirty[6]; | |||
| uint16_t constbuf_valid[6]; | |||
| boolean cb_dirty; | |||
| bool cb_dirty; | |||
| struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS]; | |||
| unsigned num_vtxbufs; | |||
| @@ -164,14 +164,14 @@ struct nvc0_context { | |||
| unsigned sample_mask; | |||
| unsigned min_samples; | |||
| boolean vbo_push_hint; | |||
| bool vbo_push_hint; | |||
| uint8_t tfbbuf_dirty; | |||
| struct pipe_stream_output_target *tfbbuf[4]; | |||
| unsigned num_tfbbufs; | |||
| struct pipe_query *cond_query; | |||
| boolean cond_cond; /* inverted rendering condition */ | |||
| bool cond_cond; /* inverted rendering condition */ | |||
| uint cond_mode; | |||
| uint32_t cond_condmode; /* the calculated condition */ | |||
| @@ -210,15 +210,15 @@ nvc0_shader_stage(unsigned pipe) | |||
| /* nvc0_context.c */ | |||
| struct pipe_context *nvc0_create(struct pipe_screen *, void *); | |||
| void nvc0_bufctx_fence(struct nvc0_context *, struct nouveau_bufctx *, | |||
| boolean on_flush); | |||
| bool on_flush); | |||
| void nvc0_default_kick_notify(struct nouveau_pushbuf *); | |||
| /* nvc0_draw.c */ | |||
| extern struct draw_stage *nvc0_draw_render_stage(struct nvc0_context *); | |||
| /* nvc0_program.c */ | |||
| boolean nvc0_program_translate(struct nvc0_program *, uint16_t chipset); | |||
| boolean nvc0_program_upload_code(struct nvc0_context *, struct nvc0_program *); | |||
| bool nvc0_program_translate(struct nvc0_program *, uint16_t chipset); | |||
| bool nvc0_program_upload_code(struct nvc0_context *, struct nvc0_program *); | |||
| void nvc0_program_destroy(struct nvc0_context *, struct nvc0_program *); | |||
| void nvc0_program_library_upload(struct nvc0_context *); | |||
| uint32_t nvc0_program_symbol_offset(const struct nvc0_program *, | |||
| @@ -231,7 +231,7 @@ void nvc0_query_pushbuf_submit(struct nouveau_pushbuf *, | |||
| void nvc0_query_fifo_wait(struct nouveau_pushbuf *, struct pipe_query *); | |||
| void nvc0_so_target_save_offset(struct pipe_context *, | |||
| struct pipe_stream_output_target *, unsigned i, | |||
| boolean *serialize); | |||
| bool *serialize); | |||
| #define NVC0_QUERY_TFB_BUFFER_OFFSET (PIPE_QUERY_TYPES + 0) | |||
| @@ -250,8 +250,8 @@ extern void nvc0_init_state_functions(struct nvc0_context *); | |||
| /* nvc0_state_validate.c */ | |||
| void nvc0_validate_global_residents(struct nvc0_context *, | |||
| struct nouveau_bufctx *, int bin); | |||
| extern boolean nvc0_state_validate(struct nvc0_context *, uint32_t state_mask, | |||
| unsigned space_words); | |||
| extern bool nvc0_state_validate(struct nvc0_context *, uint32_t state_mask, | |||
| unsigned space_words); | |||
| /* nvc0_surface.c */ | |||
| extern void nvc0_clear(struct pipe_context *, unsigned buffers, | |||
| @@ -260,7 +260,7 @@ extern void nvc0_clear(struct pipe_context *, unsigned buffers, | |||
| extern void nvc0_init_surface_functions(struct nvc0_context *); | |||
| /* nvc0_tex.c */ | |||
| boolean nve4_validate_tsc(struct nvc0_context *nvc0, int s); | |||
| bool nve4_validate_tsc(struct nvc0_context *nvc0, int s); | |||
| void nvc0_validate_textures(struct nvc0_context *); | |||
| void nvc0_validate_samplers(struct nvc0_context *); | |||
| void nve4_set_tex_handles(struct nvc0_context *); | |||
| @@ -29,13 +29,13 @@ | |||
| #include "nvc0/nvc0_resource.h" | |||
| static uint32_t | |||
| nvc0_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, boolean is_3d) | |||
| nvc0_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, bool is_3d) | |||
| { | |||
| return nv50_tex_choose_tile_dims_helper(nx, ny, nz, is_3d); | |||
| } | |||
| static uint32_t | |||
| nvc0_mt_choose_storage_type(struct nv50_miptree *mt, boolean compressed) | |||
| nvc0_mt_choose_storage_type(struct nv50_miptree *mt, bool compressed) | |||
| { | |||
| const unsigned ms = util_logbase2(mt->base.base.nr_samples); | |||
| @@ -133,7 +133,7 @@ nvc0_mt_choose_storage_type(struct nv50_miptree *mt, boolean compressed) | |||
| return tile_flags; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nvc0_miptree_init_ms_mode(struct nv50_miptree *mt) | |||
| { | |||
| switch (mt->base.base.nr_samples) { | |||
| @@ -157,9 +157,9 @@ nvc0_miptree_init_ms_mode(struct nv50_miptree *mt) | |||
| break; | |||
| default: | |||
| NOUVEAU_ERR("invalid nr_samples: %u\n", mt->base.base.nr_samples); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -250,7 +250,7 @@ nvc0_miptree_create(struct pipe_screen *pscreen, | |||
| struct nouveau_device *dev = nouveau_screen(pscreen)->device; | |||
| struct nv50_miptree *mt = CALLOC_STRUCT(nv50_miptree); | |||
| struct pipe_resource *pt = &mt->base.base; | |||
| boolean compressed = dev->drm_version >= 0x01000101; | |||
| bool compressed = dev->drm_version >= 0x01000101; | |||
| int ret; | |||
| union nouveau_bo_config bo_config; | |||
| uint32_t bo_flags; | |||
| @@ -523,7 +523,7 @@ nvc0_program_dump(struct nvc0_program *prog) | |||
| } | |||
| #endif | |||
| boolean | |||
| bool | |||
| nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset) | |||
| { | |||
| struct nv50_ir_prog_info *info; | |||
| @@ -531,7 +531,7 @@ nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset) | |||
| info = CALLOC_STRUCT(nv50_ir_prog_info); | |||
| if (!info) | |||
| return FALSE; | |||
| return false; | |||
| info->type = prog->type; | |||
| info->target = chipset; | |||
| @@ -630,7 +630,7 @@ nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset) | |||
| assert(info->bin.tlsSpace < (1 << 24)); | |||
| prog->hdr[0] |= 1 << 26; | |||
| prog->hdr[1] |= align(info->bin.tlsSpace, 0x10); /* l[] size */ | |||
| prog->need_tls = TRUE; | |||
| prog->need_tls = true; | |||
| } | |||
| /* TODO: factor 2 only needed where joinat/precont is used, | |||
| * and we only have to count non-uniform branches | |||
| @@ -638,7 +638,7 @@ nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset) | |||
| /* | |||
| if ((info->maxCFDepth * 2) > 16) { | |||
| prog->hdr[2] |= (((info->maxCFDepth * 2) + 47) / 48) * 0x200; | |||
| prog->need_tls = TRUE; | |||
| prog->need_tls = true; | |||
| } | |||
| */ | |||
| if (info->io.globalAccess) | |||
| @@ -655,11 +655,11 @@ out: | |||
| return !ret; | |||
| } | |||
| boolean | |||
| bool | |||
| nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog) | |||
| { | |||
| struct nvc0_screen *screen = nvc0->screen; | |||
| const boolean is_cp = prog->type == PIPE_SHADER_COMPUTE; | |||
| const bool is_cp = prog->type == PIPE_SHADER_COMPUTE; | |||
| int ret; | |||
| uint32_t size = prog->code_size + (is_cp ? 0 : NVC0_SHADER_HEADER_SIZE); | |||
| uint32_t lib_pos = screen->lib_code->start; | |||
| @@ -694,7 +694,7 @@ nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog) | |||
| ret = nouveau_heap_alloc(heap, size, prog, &prog->mem); | |||
| if (ret) { | |||
| NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0); | |||
| } | |||
| @@ -729,7 +729,7 @@ nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog) | |||
| nv50_ir_relocate_code(prog->relocs, prog->code, code_pos, lib_pos, 0); | |||
| #ifdef DEBUG | |||
| if (debug_get_bool_option("NV50_PROG_DEBUG", FALSE)) | |||
| if (debug_get_bool_option("NV50_PROG_DEBUG", false)) | |||
| nvc0_program_dump(prog); | |||
| #endif | |||
| @@ -746,7 +746,7 @@ nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog) | |||
| BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(MEM_BARRIER), 1); | |||
| PUSH_DATA (nvc0->base.pushbuf, 0x1011); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| /* Upload code for builtin functions like integer division emulation. */ | |||
| @@ -21,8 +21,8 @@ struct nvc0_program { | |||
| struct pipe_shader_state pipe; | |||
| ubyte type; | |||
| boolean translated; | |||
| boolean need_tls; | |||
| bool translated; | |||
| bool need_tls; | |||
| uint8_t num_gprs; | |||
| uint32_t *code; | |||
| @@ -41,7 +41,7 @@ struct nvc0_program { | |||
| uint8_t clip_enable; /* mask of defined clip planes */ | |||
| uint8_t num_ucps; /* also set to max if ClipDistance is used */ | |||
| uint8_t edgeflag; /* attribute index of edgeflag input */ | |||
| boolean need_vertex_id; | |||
| bool need_vertex_id; | |||
| } vp; | |||
| struct { | |||
| uint8_t early_z; | |||
| @@ -44,7 +44,7 @@ struct nvc0_query { | |||
| uint32_t base; | |||
| uint32_t offset; /* base + i * rotate */ | |||
| uint8_t state; | |||
| boolean is64bit; | |||
| bool is64bit; | |||
| uint8_t rotate; | |||
| int nesting; /* only used for occlusion queries */ | |||
| union { | |||
| @@ -68,7 +68,7 @@ nvc0_query(struct pipe_query *pipe) | |||
| return (struct nvc0_query *)pipe; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvc0_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, int size) | |||
| { | |||
| struct nvc0_screen *screen = nvc0->screen; | |||
| @@ -87,17 +87,17 @@ nvc0_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, int size) | |||
| if (size) { | |||
| q->u.mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base); | |||
| if (!q->bo) | |||
| return FALSE; | |||
| return false; | |||
| q->offset = q->base; | |||
| ret = nouveau_bo_map(q->bo, 0, screen->base.client); | |||
| if (ret) { | |||
| nvc0_query_allocate(nvc0, q, 0); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base); | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static void | |||
| @@ -126,17 +126,17 @@ nvc0_query_create(struct pipe_context *pipe, unsigned type, unsigned index) | |||
| space = NVC0_QUERY_ALLOC_SPACE; | |||
| break; | |||
| case PIPE_QUERY_PIPELINE_STATISTICS: | |||
| q->is64bit = TRUE; | |||
| q->is64bit = true; | |||
| space = 512; | |||
| break; | |||
| case PIPE_QUERY_SO_STATISTICS: | |||
| case PIPE_QUERY_SO_OVERFLOW_PREDICATE: | |||
| q->is64bit = TRUE; | |||
| q->is64bit = true; | |||
| space = 64; | |||
| break; | |||
| case PIPE_QUERY_PRIMITIVES_GENERATED: | |||
| case PIPE_QUERY_PRIMITIVES_EMITTED: | |||
| q->is64bit = TRUE; | |||
| q->is64bit = true; | |||
| q->index = index; | |||
| space = 32; | |||
| break; | |||
| @@ -257,11 +257,11 @@ nvc0_query_begin(struct pipe_context *pipe, struct pipe_query *pq) | |||
| struct nvc0_context *nvc0 = nvc0_context(pipe); | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| struct nvc0_query *q = nvc0_query(pq); | |||
| boolean ret = true; | |||
| bool ret = true; | |||
| /* For occlusion queries we have to change the storage, because a previous | |||
| * query might set the initial render conition to FALSE even *after* we re- | |||
| * initialized it to TRUE. | |||
| * query might set the initial render conition to false even *after* we re- | |||
| * initialized it to true. | |||
| */ | |||
| if (q->rotate) { | |||
| nvc0_query_rotate(nvc0, q); | |||
| @@ -270,7 +270,7 @@ nvc0_query_begin(struct pipe_context *pipe, struct pipe_query *pq) | |||
| * query ? | |||
| */ | |||
| q->data[0] = q->sequence; /* initialize sequence */ | |||
| q->data[1] = 1; /* initial render condition = TRUE */ | |||
| q->data[1] = 1; /* initial render condition = true */ | |||
| q->data[4] = q->sequence + 1; /* for comparison COND_MODE */ | |||
| q->data[5] = 0; | |||
| } | |||
| @@ -401,7 +401,7 @@ nvc0_query_end(struct pipe_context *pipe, struct pipe_query *pq) | |||
| nvc0_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5)); | |||
| break; | |||
| case PIPE_QUERY_TIMESTAMP_DISJOINT: | |||
| /* This query is not issued on GPU because disjoint is forced to FALSE */ | |||
| /* This query is not issued on GPU because disjoint is forced to false */ | |||
| q->state = NVC0_QUERY_STATE_READY; | |||
| break; | |||
| default: | |||
| @@ -442,7 +442,7 @@ nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| struct nvc0_query *q = nvc0_query(pq); | |||
| uint64_t *res64 = (uint64_t*)result; | |||
| uint32_t *res32 = (uint32_t*)result; | |||
| boolean *res8 = (boolean*)result; | |||
| uint8_t *res8 = (uint8_t*)result; | |||
| uint64_t *data64 = (uint64_t *)q->data; | |||
| unsigned i; | |||
| @@ -450,7 +450,7 @@ nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| if (q->type >= NVC0_QUERY_DRV_STAT(0) && | |||
| q->type <= NVC0_QUERY_DRV_STAT_LAST) { | |||
| res64[0] = q->u.value; | |||
| return TRUE; | |||
| return true; | |||
| } else | |||
| #endif | |||
| if ((q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST) || | |||
| @@ -468,17 +468,17 @@ nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| /* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */ | |||
| PUSH_KICK(nvc0->base.pushbuf); | |||
| } | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->screen->base.client)) | |||
| return FALSE; | |||
| return false; | |||
| NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1); | |||
| } | |||
| q->state = NVC0_QUERY_STATE_READY; | |||
| switch (q->type) { | |||
| case PIPE_QUERY_GPU_FINISHED: | |||
| res8[0] = TRUE; | |||
| res8[0] = true; | |||
| break; | |||
| case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */ | |||
| res64[0] = q->data[1] - q->data[5]; | |||
| @@ -502,7 +502,7 @@ nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| break; | |||
| case PIPE_QUERY_TIMESTAMP_DISJOINT: | |||
| res64[0] = 1000000000; | |||
| res8[8] = FALSE; | |||
| res8[8] = false; | |||
| break; | |||
| case PIPE_QUERY_TIME_ELAPSED: | |||
| res64[0] = data64[1] - data64[3]; | |||
| @@ -516,10 +516,10 @@ nvc0_query_result(struct pipe_context *pipe, struct pipe_query *pq, | |||
| break; | |||
| default: | |||
| assert(0); /* can't happen, we don't create queries with invalid type */ | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -549,7 +549,7 @@ nvc0_render_condition(struct pipe_context *pipe, | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| struct nvc0_query *q; | |||
| uint32_t cond; | |||
| boolean wait = | |||
| bool wait = | |||
| mode != PIPE_RENDER_COND_NO_WAIT && | |||
| mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; | |||
| @@ -563,7 +563,7 @@ nvc0_render_condition(struct pipe_context *pipe, | |||
| case PIPE_QUERY_SO_OVERFLOW_PREDICATE: | |||
| cond = condition ? NVC0_3D_COND_MODE_EQUAL : | |||
| NVC0_3D_COND_MODE_NOT_EQUAL; | |||
| wait = TRUE; | |||
| wait = true; | |||
| break; | |||
| case PIPE_QUERY_OCCLUSION_COUNTER: | |||
| case PIPE_QUERY_OCCLUSION_PREDICATE: | |||
| @@ -626,12 +626,12 @@ nvc0_query_pushbuf_submit(struct nouveau_pushbuf *push, | |||
| void | |||
| nvc0_so_target_save_offset(struct pipe_context *pipe, | |||
| struct pipe_stream_output_target *ptarg, | |||
| unsigned index, boolean *serialize) | |||
| unsigned index, bool *serialize) | |||
| { | |||
| struct nvc0_so_target *targ = nvc0_so_target(ptarg); | |||
| if (*serialize) { | |||
| *serialize = FALSE; | |||
| *serialize = false; | |||
| PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1); | |||
| IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0); | |||
| @@ -1080,7 +1080,7 @@ nvc0_mp_pm_query_begin(struct nvc0_context *nvc0, struct nvc0_query *q) | |||
| { | |||
| struct nvc0_screen *screen = nvc0->screen; | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| const boolean is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS; | |||
| const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS; | |||
| const struct nvc0_mp_pm_query_cfg *cfg; | |||
| unsigned i, c; | |||
| unsigned num_ab[2] = { 0, 0 }; | |||
| @@ -1101,7 +1101,7 @@ nvc0_mp_pm_query_begin(struct nvc0_context *nvc0, struct nvc0_query *q) | |||
| PUSH_SPACE(push, 4 * 8 * (is_nve4 ? 1 : 6) + 6); | |||
| if (!screen->pm.mp_counters_enabled) { | |||
| screen->pm.mp_counters_enabled = TRUE; | |||
| screen->pm.mp_counters_enabled = true; | |||
| BEGIN_NVC0(push, SUBC_SW(0x06ac), 1); | |||
| PUSH_DATA (push, 0x1fcb); | |||
| } | |||
| @@ -1168,7 +1168,7 @@ nvc0_mp_pm_query_end(struct nvc0_context *nvc0, struct nvc0_query *q) | |||
| struct nvc0_screen *screen = nvc0->screen; | |||
| struct pipe_context *pipe = &nvc0->base.pipe; | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| const boolean is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS; | |||
| const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS; | |||
| uint32_t mask; | |||
| uint32_t input[3]; | |||
| const uint block[3] = { 32, is_nve4 ? 4 : 1, 1 }; | |||
| @@ -1181,7 +1181,7 @@ nvc0_mp_pm_query_end(struct nvc0_context *nvc0, struct nvc0_query *q) | |||
| if (unlikely(!screen->pm.prog)) { | |||
| struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program); | |||
| prog->type = PIPE_SHADER_COMPUTE; | |||
| prog->translated = TRUE; | |||
| prog->translated = true; | |||
| prog->num_gprs = 14; | |||
| prog->parm_size = 12; | |||
| if (is_nve4) { | |||
| @@ -1249,9 +1249,9 @@ nvc0_mp_pm_query_end(struct nvc0_context *nvc0, struct nvc0_query *q) | |||
| } | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nvc0_mp_pm_query_read_data(uint32_t count[32][4], | |||
| struct nvc0_context *nvc0, boolean wait, | |||
| struct nvc0_context *nvc0, bool wait, | |||
| struct nvc0_query *q, | |||
| const struct nvc0_mp_pm_query_cfg *cfg, | |||
| unsigned mp_count) | |||
| @@ -1264,19 +1264,19 @@ nvc0_mp_pm_query_read_data(uint32_t count[32][4], | |||
| for (c = 0; c < cfg->num_counters; ++c) { | |||
| if (q->data[b + 8] != q->sequence) { | |||
| if (!wait) | |||
| return FALSE; | |||
| return false; | |||
| if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->base.client)) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| count[p][c] = q->data[b + q->ctr[c]]; | |||
| } | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nve4_mp_pm_query_read_data(uint32_t count[32][4], | |||
| struct nvc0_context *nvc0, boolean wait, | |||
| struct nvc0_context *nvc0, bool wait, | |||
| struct nvc0_query *q, | |||
| const struct nvc0_mp_pm_query_cfg *cfg, | |||
| unsigned mp_count) | |||
| @@ -1291,9 +1291,9 @@ nve4_mp_pm_query_read_data(uint32_t count[32][4], | |||
| for (d = 0; d < ((q->ctr[c] & ~3) ? 1 : 4); ++d) { | |||
| if (q->data[b + 20 + d] != q->sequence) { | |||
| if (!wait) | |||
| return FALSE; | |||
| return false; | |||
| if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->base.client)) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (q->ctr[c] & ~0x3) | |||
| count[p][c] = q->data[b + 16 + (q->ctr[c] & 3)]; | |||
| @@ -1302,7 +1302,7 @@ nve4_mp_pm_query_read_data(uint32_t count[32][4], | |||
| } | |||
| } | |||
| } | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| /* Metric calculations: | |||
| @@ -1325,7 +1325,7 @@ nvc0_mp_pm_query_result(struct nvc0_context *nvc0, struct nvc0_query *q, | |||
| unsigned mp_count = MIN2(nvc0->screen->mp_count_compute, 32); | |||
| unsigned p, c; | |||
| const struct nvc0_mp_pm_query_cfg *cfg; | |||
| boolean ret; | |||
| bool ret; | |||
| cfg = nvc0_mp_pm_query_get_cfg(nvc0, q); | |||
| @@ -1334,7 +1334,7 @@ nvc0_mp_pm_query_result(struct nvc0_context *nvc0, struct nvc0_query *q, | |||
| else | |||
| ret = nvc0_mp_pm_query_read_data(count, nvc0, wait, q, cfg, mp_count); | |||
| if (!ret) | |||
| return FALSE; | |||
| return false; | |||
| if (cfg->op == NVC0_COUNTER_OPn_SUM) { | |||
| for (c = 0; c < cfg->num_counters; ++c) | |||
| @@ -1394,7 +1394,7 @@ nvc0_mp_pm_query_result(struct nvc0_context *nvc0, struct nvc0_query *q, | |||
| } | |||
| *(uint64_t *)result = value; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| int | |||
| @@ -44,16 +44,16 @@ nvc0_screen_is_format_supported(struct pipe_screen *pscreen, | |||
| unsigned bindings) | |||
| { | |||
| if (sample_count > 8) | |||
| return FALSE; | |||
| return false; | |||
| if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */ | |||
| return FALSE; | |||
| return false; | |||
| if (!util_format_is_supported(format, bindings)) | |||
| return FALSE; | |||
| return false; | |||
| if ((bindings & PIPE_BIND_SAMPLER_VIEW) && (target != PIPE_BUFFER)) | |||
| if (util_format_get_blocksizebits(format) == 3 * 32) | |||
| return FALSE; | |||
| return false; | |||
| /* transfers & shared are always supported */ | |||
| bindings &= ~(PIPE_BIND_TRANSFER_READ | | |||
| @@ -556,7 +556,7 @@ nvc0_screen_init_compute(struct nvc0_screen *screen) | |||
| /* Using COMPUTE has weird effects on 3D state, we need to | |||
| * investigate this further before enabling it by default. | |||
| */ | |||
| if (debug_get_bool_option("NVC0_COMPUTE", FALSE)) | |||
| if (debug_get_bool_option("NVC0_COMPUTE", false)) | |||
| return nvc0_screen_compute_setup(screen, screen->base.pushbuf); | |||
| return 0; | |||
| case 0xe0: | |||
| @@ -570,7 +570,7 @@ nvc0_screen_init_compute(struct nvc0_screen *screen) | |||
| } | |||
| } | |||
| boolean | |||
| bool | |||
| nvc0_screen_resize_tls_area(struct nvc0_screen *screen, | |||
| uint32_t lpos, uint32_t lneg, uint32_t cstack) | |||
| { | |||
| @@ -580,7 +580,7 @@ nvc0_screen_resize_tls_area(struct nvc0_screen *screen, | |||
| if (size >= (1 << 20)) { | |||
| NOUVEAU_ERR("requested TLS size too large: 0x%"PRIx64"\n", size); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| size *= (screen->base.device->chipset >= 0xe0) ? 64 : 48; /* max warps */ | |||
| @@ -593,11 +593,11 @@ nvc0_screen_resize_tls_area(struct nvc0_screen *screen, | |||
| NULL, &bo); | |||
| if (ret) { | |||
| NOUVEAU_ERR("failed to allocate TLS area, size: 0x%"PRIx64"\n", size); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| nouveau_bo_ref(NULL, &screen->tls); | |||
| screen->tls = bo; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| #define FAIL_SCREEN_INIT(str, err) \ | |||
| @@ -791,7 +791,7 @@ nvc0_screen_create(struct nouveau_device *dev) | |||
| BEGIN_NVC0(push, NVC0_3D(COND_MODE), 1); | |||
| PUSH_DATA (push, NVC0_3D_COND_MODE_ALWAYS); | |||
| if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", TRUE)) { | |||
| if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) { | |||
| /* kill shaders after about 1 second (at 100 MHz) */ | |||
| BEGIN_NVC0(push, NVC0_3D(WATCHDOG_TIMER), 1); | |||
| PUSH_DATA (push, 0x17); | |||
| @@ -1041,7 +1041,7 @@ nvc0_screen_create(struct nouveau_device *dev) | |||
| if (!nvc0_blitter_create(screen)) | |||
| goto fail; | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE); | |||
| nouveau_fence_new(&screen->base, &screen->base.fence.current, false); | |||
| return pscreen; | |||
| @@ -28,10 +28,10 @@ struct nvc0_context; | |||
| struct nvc0_blitter; | |||
| struct nvc0_graph_state { | |||
| boolean flushed; | |||
| boolean rasterizer_discard; | |||
| boolean early_z_forced; | |||
| boolean prim_restart; | |||
| bool flushed; | |||
| bool rasterizer_discard; | |||
| bool early_z_forced; | |||
| bool prim_restart; | |||
| uint32_t instance_elts; /* bitmask of per-instance elements */ | |||
| uint32_t instance_base; | |||
| uint32_t constant_vbos; | |||
| @@ -95,7 +95,7 @@ struct nvc0_screen { | |||
| struct nvc0_program *prog; /* compute state object to read MP counters */ | |||
| struct pipe_query *mp_counter[8]; /* counter to query allocation */ | |||
| uint8_t num_mp_pm_active[2]; | |||
| boolean mp_counters_enabled; | |||
| bool mp_counters_enabled; | |||
| } pm; | |||
| struct nouveau_object *eng3d; /* sqrt(1/2)|kepler> + sqrt(1/2)|fermi> */ | |||
| @@ -276,7 +276,7 @@ int nvc0_screen_get_driver_query_info(struct pipe_screen *, unsigned, | |||
| int nvc0_screen_get_driver_query_group_info(struct pipe_screen *, unsigned, | |||
| struct pipe_driver_query_group_info *); | |||
| boolean nvc0_blitter_create(struct nvc0_screen *); | |||
| bool nvc0_blitter_create(struct nvc0_screen *); | |||
| void nvc0_blitter_destroy(struct nvc0_screen *); | |||
| void nvc0_screen_make_buffers_resident(struct nvc0_screen *); | |||
| @@ -287,8 +287,8 @@ int nvc0_screen_tsc_alloc(struct nvc0_screen *, void *); | |||
| int nve4_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *); | |||
| int nvc0_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *); | |||
| boolean nvc0_screen_resize_tls_area(struct nvc0_screen *, uint32_t lpos, | |||
| uint32_t lneg, uint32_t cstack); | |||
| bool nvc0_screen_resize_tls_area(struct nvc0_screen *, uint32_t lpos, | |||
| uint32_t lneg, uint32_t cstack); | |||
| static INLINE void | |||
| nvc0_resource_fence(struct nv04_resource *res, uint32_t flags) | |||
| @@ -63,22 +63,22 @@ nvc0_program_update_context_state(struct nvc0_context *nvc0, | |||
| } | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nvc0_program_validate(struct nvc0_context *nvc0, struct nvc0_program *prog) | |||
| { | |||
| if (prog->mem) | |||
| return TRUE; | |||
| return true; | |||
| if (!prog->translated) { | |||
| prog->translated = nvc0_program_translate( | |||
| prog, nvc0->screen->base.device->chipset); | |||
| if (!prog->translated) | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| if (likely(prog->code_size)) | |||
| return nvc0_program_upload_code(nvc0, prog); | |||
| return TRUE; /* stream output info only */ | |||
| return true; /* stream output info only */ | |||
| } | |||
| void | |||
| @@ -192,7 +192,7 @@ nvc0_gmtyprog_validate(struct nvc0_context *nvc0) | |||
| /* we allow GPs with no code for specifying stream output state only */ | |||
| if (gp && gp->code_size) { | |||
| const boolean gp_selects_layer = !!(gp->hdr[13] & (1 << 9)); | |||
| const bool gp_selects_layer = !!(gp->hdr[13] & (1 << 9)); | |||
| BEGIN_NVC0(push, NVC0_3D(MACRO_GP_SELECT), 1); | |||
| PUSH_DATA (push, 0x41); | |||
| @@ -280,7 +280,7 @@ nvc0_tfb_validate(struct nvc0_context *nvc0) | |||
| nvc0_query_pushbuf_submit(push, targ->pq, 0x4); | |||
| } else { | |||
| PUSH_DATA(push, 0); /* TFB_BUFFER_OFFSET */ | |||
| targ->clean = FALSE; | |||
| targ->clean = false; | |||
| } | |||
| } | |||
| for (; b < 4; ++b) | |||
| @@ -92,8 +92,8 @@ nvc0_blend_state_create(struct pipe_context *pipe, | |||
| int r; /* reference */ | |||
| uint32_t ms; | |||
| uint8_t blend_en = 0; | |||
| boolean indep_masks = FALSE; | |||
| boolean indep_funcs = FALSE; | |||
| bool indep_masks = false; | |||
| bool indep_funcs = false; | |||
| so->pipe = *cso; | |||
| @@ -111,7 +111,7 @@ nvc0_blend_state_create(struct pipe_context *pipe, | |||
| cso->rt[i].alpha_func != cso->rt[r].alpha_func || | |||
| cso->rt[i].alpha_src_factor != cso->rt[r].alpha_src_factor || | |||
| cso->rt[i].alpha_dst_factor != cso->rt[r].alpha_dst_factor) { | |||
| indep_funcs = TRUE; | |||
| indep_funcs = true; | |||
| break; | |||
| } | |||
| } | |||
| @@ -120,7 +120,7 @@ nvc0_blend_state_create(struct pipe_context *pipe, | |||
| for (i = 1; i < 8; ++i) { | |||
| if (cso->rt[i].colormask != cso->rt[0].colormask) { | |||
| indep_masks = TRUE; | |||
| indep_masks = true; | |||
| break; | |||
| } | |||
| } | |||
| @@ -790,7 +790,7 @@ nvc0_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, | |||
| pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res); | |||
| nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE; | |||
| nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false; | |||
| if (nvc0->constbuf[s][i].user) { | |||
| nvc0->constbuf[s][i].u.data = cb->user_buffer; | |||
| nvc0->constbuf[s][i].size = MIN2(cb->buffer_size, 0x10000); | |||
| @@ -1018,7 +1018,7 @@ nvc0_so_target_create(struct pipe_context *pipe, | |||
| FREE(targ); | |||
| return NULL; | |||
| } | |||
| targ->clean = TRUE; | |||
| targ->clean = true; | |||
| targ->pipe.buffer_size = size; | |||
| targ->pipe.buffer_offset = offset; | |||
| @@ -1051,13 +1051,13 @@ nvc0_set_transform_feedback_targets(struct pipe_context *pipe, | |||
| { | |||
| struct nvc0_context *nvc0 = nvc0_context(pipe); | |||
| unsigned i; | |||
| boolean serialize = TRUE; | |||
| bool serialize = true; | |||
| assert(num_targets <= 4); | |||
| for (i = 0; i < num_targets; ++i) { | |||
| const boolean changed = nvc0->tfbbuf[i] != targets[i]; | |||
| const boolean append = (offsets[i] == ((unsigned)-1)); | |||
| const bool changed = nvc0->tfbbuf[i] != targets[i]; | |||
| const bool append = (offsets[i] == ((unsigned)-1)); | |||
| if (!changed && append) | |||
| continue; | |||
| nvc0->tfbbuf_dirty |= 1 << i; | |||
| @@ -1066,7 +1066,7 @@ nvc0_set_transform_feedback_targets(struct pipe_context *pipe, | |||
| nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize); | |||
| if (targets[i] && !append) | |||
| nvc0_so_target(targets[i])->clean = TRUE; | |||
| nvc0_so_target(targets[i])->clean = true; | |||
| pipe_so_target_reference(&nvc0->tfbbuf[i], targets[i]); | |||
| } | |||
| @@ -74,7 +74,7 @@ nvc0_validate_fb(struct nvc0_context *nvc0) | |||
| struct pipe_framebuffer_state *fb = &nvc0->framebuffer; | |||
| unsigned i, ms; | |||
| unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1; | |||
| boolean serialize = FALSE; | |||
| bool serialize = false; | |||
| nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB); | |||
| @@ -136,7 +136,7 @@ nvc0_validate_fb(struct nvc0_context *nvc0) | |||
| } | |||
| if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING) | |||
| serialize = TRUE; | |||
| serialize = true; | |||
| res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; | |||
| res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING; | |||
| @@ -168,7 +168,7 @@ nvc0_validate_fb(struct nvc0_context *nvc0) | |||
| ms_mode = mt->ms_mode; | |||
| if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING) | |||
| serialize = TRUE; | |||
| serialize = true; | |||
| mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; | |||
| mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING; | |||
| @@ -518,12 +518,12 @@ static void | |||
| nvc0_validate_derived_1(struct nvc0_context *nvc0) | |||
| { | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| boolean rasterizer_discard; | |||
| bool rasterizer_discard; | |||
| if (nvc0->rast && nvc0->rast->pipe.rasterizer_discard) { | |||
| rasterizer_discard = TRUE; | |||
| rasterizer_discard = true; | |||
| } else { | |||
| boolean zs = nvc0->zsa && | |||
| bool zs = nvc0->zsa && | |||
| (nvc0->zsa->pipe.depth.enabled || nvc0->zsa->pipe.stencil[0].enabled); | |||
| rasterizer_discard = !zs && | |||
| (!nvc0->fragprog || !nvc0->fragprog->hdr[18]); | |||
| @@ -631,7 +631,7 @@ static struct state_validate { | |||
| }; | |||
| #define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0])) | |||
| boolean | |||
| bool | |||
| nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words) | |||
| { | |||
| uint32_t state_mask; | |||
| @@ -652,15 +652,15 @@ nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words) | |||
| } | |||
| nvc0->dirty &= ~state_mask; | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, FALSE); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, false); | |||
| } | |||
| nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_3d); | |||
| ret = nouveau_pushbuf_validate(nvc0->base.pushbuf); | |||
| if (unlikely(nvc0->state.flushed)) { | |||
| nvc0->state.flushed = FALSE; | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, TRUE); | |||
| nvc0->state.flushed = false; | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, true); | |||
| } | |||
| return !ret; | |||
| } | |||
| @@ -39,7 +39,7 @@ struct nvc0_constbuf { | |||
| } u; | |||
| uint32_t size; | |||
| uint32_t offset; | |||
| boolean user; /* should only be TRUE if u.data is valid and non-NULL */ | |||
| bool user; /* should only be true if u.data is valid and non-NULL */ | |||
| }; | |||
| struct nvc0_vertex_element { | |||
| @@ -55,8 +55,8 @@ struct nvc0_vertex_stateobj { | |||
| unsigned num_elements; | |||
| uint32_t instance_elts; | |||
| uint32_t instance_bufs; | |||
| boolean shared_slots; | |||
| boolean need_conversion; /* e.g. VFETCH cannot convert f64 to f32 */ | |||
| bool shared_slots; | |||
| bool need_conversion; /* e.g. VFETCH cannot convert f64 to f32 */ | |||
| unsigned size; /* size of vertex in bytes (when packed) */ | |||
| struct nvc0_vertex_element element[0]; | |||
| }; | |||
| @@ -65,7 +65,7 @@ struct nvc0_so_target { | |||
| struct pipe_stream_output_target pipe; | |||
| struct pipe_query *pq; | |||
| unsigned stride; | |||
| boolean clean; | |||
| bool clean; | |||
| }; | |||
| static INLINE struct nvc0_so_target * | |||
| @@ -48,7 +48,7 @@ | |||
| #include "nv50/nv50_blit.h" | |||
| static INLINE uint8_t | |||
| nvc0_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal) | |||
| nvc0_2d_format(enum pipe_format format, bool dst, bool dst_src_equal) | |||
| { | |||
| uint8_t id = nvc0_format_table[format].rt; | |||
| @@ -81,9 +81,9 @@ nvc0_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal) | |||
| } | |||
| static int | |||
| nvc0_2d_texture_set(struct nouveau_pushbuf *push, boolean dst, | |||
| nvc0_2d_texture_set(struct nouveau_pushbuf *push, bool dst, | |||
| struct nv50_miptree *mt, unsigned level, unsigned layer, | |||
| enum pipe_format pformat, boolean dst_src_pformat_equal) | |||
| enum pipe_format pformat, bool dst_src_pformat_equal) | |||
| { | |||
| struct nouveau_bo *bo = mt->base.bo; | |||
| uint32_t width, height, depth; | |||
| @@ -161,16 +161,16 @@ nvc0_2d_texture_do_copy(struct nouveau_pushbuf *push, | |||
| const enum pipe_format dfmt = dst->base.base.format; | |||
| const enum pipe_format sfmt = src->base.base.format; | |||
| int ret; | |||
| boolean eqfmt = dfmt == sfmt; | |||
| bool eqfmt = dfmt == sfmt; | |||
| if (!PUSH_SPACE(push, 2 * 16 + 32)) | |||
| return PIPE_ERROR; | |||
| ret = nvc0_2d_texture_set(push, TRUE, dst, dst_level, dz, dfmt, eqfmt); | |||
| ret = nvc0_2d_texture_set(push, true, dst, dst_level, dz, dfmt, eqfmt); | |||
| if (ret) | |||
| return ret; | |||
| ret = nvc0_2d_texture_set(push, FALSE, src, src_level, sz, sfmt, eqfmt); | |||
| ret = nvc0_2d_texture_set(push, false, src, src_level, sz, sfmt, eqfmt); | |||
| if (ret) | |||
| return ret; | |||
| @@ -203,7 +203,7 @@ nvc0_resource_copy_region(struct pipe_context *pipe, | |||
| { | |||
| struct nvc0_context *nvc0 = nvc0_context(pipe); | |||
| int ret; | |||
| boolean m2mf; | |||
| bool m2mf; | |||
| unsigned dst_layer = dstz, src_layer = src_box->z; | |||
| if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { | |||
| @@ -704,7 +704,7 @@ nvc0_blitter_make_vp(struct nvc0_blitter *blit) | |||
| }; | |||
| blit->vp.type = PIPE_SHADER_VERTEX; | |||
| blit->vp.translated = TRUE; | |||
| blit->vp.translated = true; | |||
| if (blit->screen->base.class_3d >= GM107_3D_CLASS) { | |||
| blit->vp.code = (uint32_t *)code_gm107; /* const_cast */ | |||
| blit->vp.code_size = sizeof(code_gm107); | |||
| @@ -1217,7 +1217,7 @@ nvc0_blit_eng2d(struct nvc0_context *nvc0, const struct pipe_blit_info *info) | |||
| int i; | |||
| uint32_t mode; | |||
| uint32_t mask = nv50_blit_eng2d_get_mask(info); | |||
| boolean b; | |||
| bool b; | |||
| mode = nv50_blit_get_filter(info) ? | |||
| NV50_2D_BLIT_CONTROL_FILTER_BILINEAR : | |||
| @@ -1377,39 +1377,39 @@ nvc0_blit(struct pipe_context *pipe, const struct pipe_blit_info *info) | |||
| { | |||
| struct nvc0_context *nvc0 = nvc0_context(pipe); | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| boolean eng3d = FALSE; | |||
| bool eng3d = false; | |||
| if (util_format_is_depth_or_stencil(info->dst.resource->format)) { | |||
| if (!(info->mask & PIPE_MASK_ZS)) | |||
| return; | |||
| if (info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT || | |||
| info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| if (info->filter != PIPE_TEX_FILTER_NEAREST) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else { | |||
| if (!(info->mask & PIPE_MASK_RGBA)) | |||
| return; | |||
| if (info->mask != PIPE_MASK_RGBA) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } | |||
| if (nv50_miptree(info->src.resource)->layout_3d) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else | |||
| if (info->src.box.depth != info->dst.box.depth) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| debug_printf("blit: cannot filter array or cube textures in z direction"); | |||
| } | |||
| if (!eng3d && info->dst.format != info->src.format) { | |||
| if (!nv50_2d_dst_format_faithful(info->dst.format)) { | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } else | |||
| if (!nv50_2d_src_format_faithful(info->src.format)) { | |||
| if (!util_format_is_luminance(info->src.format)) { | |||
| if (!nv50_2d_dst_format_ops_supported(info->dst.format)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| else | |||
| if (util_format_is_intensity(info->src.format)) | |||
| eng3d = info->src.format != PIPE_FORMAT_I8_UNORM; | |||
| @@ -1421,24 +1421,24 @@ nvc0_blit(struct pipe_context *pipe, const struct pipe_blit_info *info) | |||
| } | |||
| } else | |||
| if (util_format_is_luminance_alpha(info->src.format)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| } | |||
| if (info->src.resource->nr_samples == 8 && | |||
| info->dst.resource->nr_samples <= 1) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| #if 0 | |||
| /* FIXME: can't make this work with eng2d anymore, at least not on nv50 */ | |||
| if (info->src.resource->nr_samples > 1 || | |||
| info->dst.resource->nr_samples > 1) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| #endif | |||
| /* FIXME: find correct src coordinates adjustments */ | |||
| if ((info->src.box.width != info->dst.box.width && | |||
| info->src.box.width != -info->dst.box.width) || | |||
| (info->src.box.height != info->dst.box.height && | |||
| info->src.box.height != -info->dst.box.height)) | |||
| eng3d = TRUE; | |||
| eng3d = true; | |||
| if (nvc0->screen->num_occlusion_queries_active) | |||
| IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0); | |||
| @@ -1460,13 +1460,13 @@ nvc0_flush_resource(struct pipe_context *ctx, | |||
| { | |||
| } | |||
| boolean | |||
| bool | |||
| nvc0_blitter_create(struct nvc0_screen *screen) | |||
| { | |||
| screen->blitter = CALLOC_STRUCT(nvc0_blitter); | |||
| if (!screen->blitter) { | |||
| NOUVEAU_ERR("failed to allocate blitter struct\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| screen->blitter->screen = screen; | |||
| @@ -1475,7 +1475,7 @@ nvc0_blitter_create(struct nvc0_screen *screen) | |||
| nvc0_blitter_make_vp(screen->blitter); | |||
| nvc0_blitter_make_sampler(screen->blitter); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -1498,20 +1498,20 @@ nvc0_blitter_destroy(struct nvc0_screen *screen) | |||
| FREE(blitter); | |||
| } | |||
| boolean | |||
| bool | |||
| nvc0_blitctx_create(struct nvc0_context *nvc0) | |||
| { | |||
| nvc0->blit = CALLOC_STRUCT(nvc0_blitctx); | |||
| if (!nvc0->blit) { | |||
| NOUVEAU_ERR("failed to allocate blit context\n"); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| nvc0->blit->nvc0 = nvc0; | |||
| nvc0->blit->rast.pipe.half_pixel_center = 1; | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| void | |||
| @@ -35,7 +35,7 @@ | |||
| NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK) | |||
| static INLINE uint32_t | |||
| nv50_tic_swizzle(uint32_t tc, unsigned swz, boolean tex_int) | |||
| nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int) | |||
| { | |||
| switch (swz) { | |||
| case PIPE_SWIZZLE_RED: | |||
| @@ -82,7 +82,7 @@ nvc0_create_texture_view(struct pipe_context *pipe, | |||
| uint32_t depth; | |||
| struct nv50_tic_entry *view; | |||
| struct nv50_miptree *mt; | |||
| boolean tex_int; | |||
| bool tex_int; | |||
| view = MALLOC_STRUCT(nv50_tic_entry); | |||
| if (!view) | |||
| @@ -195,7 +195,7 @@ nvc0_create_texture_view(struct pipe_context *pipe, | |||
| default: | |||
| NOUVEAU_ERR("unexpected/invalid texture target: %d\n", | |||
| mt->base.base.target); | |||
| return FALSE; | |||
| return false; | |||
| } | |||
| tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000; | |||
| @@ -226,7 +226,7 @@ nvc0_create_texture_view(struct pipe_context *pipe, | |||
| return &view->pipe; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvc0_validate_tic(struct nvc0_context *nvc0, int s) | |||
| { | |||
| uint32_t commands[32]; | |||
| @@ -234,12 +234,12 @@ nvc0_validate_tic(struct nvc0_context *nvc0, int s) | |||
| struct nouveau_bo *txc = nvc0->screen->txc; | |||
| unsigned i; | |||
| unsigned n = 0; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| for (i = 0; i < nvc0->num_textures[s]; ++i) { | |||
| struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); | |||
| struct nv04_resource *res; | |||
| const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| if (!tic) { | |||
| if (dirty) | |||
| @@ -263,7 +263,7 @@ nvc0_validate_tic(struct nvc0_context *nvc0, int s) | |||
| BEGIN_NIC0(push, NVC0_M2MF(DATA), 8); | |||
| PUSH_DATAp(push, &tic->tic[0], 8); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } else | |||
| if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { | |||
| BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1); | |||
| @@ -295,18 +295,18 @@ nvc0_validate_tic(struct nvc0_context *nvc0, int s) | |||
| return need_flush; | |||
| } | |||
| static boolean | |||
| static bool | |||
| nve4_validate_tic(struct nvc0_context *nvc0, unsigned s) | |||
| { | |||
| struct nouveau_bo *txc = nvc0->screen->txc; | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| unsigned i; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| for (i = 0; i < nvc0->num_textures[s]; ++i) { | |||
| struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); | |||
| struct nv04_resource *res; | |||
| const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| if (!tic) { | |||
| nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID; | |||
| @@ -328,7 +328,7 @@ nve4_validate_tic(struct nvc0_context *nvc0, unsigned s) | |||
| PUSH_DATA (push, 0x1001); | |||
| PUSH_DATAp(push, &tic->tic[0], 8); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } else | |||
| if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { | |||
| BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1); | |||
| @@ -356,7 +356,7 @@ nve4_validate_tic(struct nvc0_context *nvc0, unsigned s) | |||
| void nvc0_validate_textures(struct nvc0_context *nvc0) | |||
| { | |||
| boolean need_flush; | |||
| bool need_flush; | |||
| if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) { | |||
| need_flush = nve4_validate_tic(nvc0, 0); | |||
| @@ -374,14 +374,14 @@ void nvc0_validate_textures(struct nvc0_context *nvc0) | |||
| } | |||
| } | |||
| static boolean | |||
| static bool | |||
| nvc0_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| { | |||
| uint32_t commands[16]; | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| unsigned i; | |||
| unsigned n = 0; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| for (i = 0; i < nvc0->num_samplers[s]; ++i) { | |||
| struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]); | |||
| @@ -398,7 +398,7 @@ nvc0_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc, | |||
| 65536 + tsc->id * 32, NV_VRAM_DOMAIN(&nvc0->screen->base), | |||
| 32, tsc->tsc); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } | |||
| nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32); | |||
| @@ -418,13 +418,13 @@ nvc0_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| return need_flush; | |||
| } | |||
| boolean | |||
| bool | |||
| nve4_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| { | |||
| struct nouveau_bo *txc = nvc0->screen->txc; | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| unsigned i; | |||
| boolean need_flush = FALSE; | |||
| bool need_flush = false; | |||
| for (i = 0; i < nvc0->num_samplers[s]; ++i) { | |||
| struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]); | |||
| @@ -447,7 +447,7 @@ nve4_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| PUSH_DATA (push, 0x1001); | |||
| PUSH_DATAp(push, &tsc->tsc[0], 8); | |||
| need_flush = TRUE; | |||
| need_flush = true; | |||
| } | |||
| nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32); | |||
| @@ -466,7 +466,7 @@ nve4_validate_tsc(struct nvc0_context *nvc0, int s) | |||
| void nvc0_validate_samplers(struct nvc0_context *nvc0) | |||
| { | |||
| boolean need_flush; | |||
| bool need_flush; | |||
| if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) { | |||
| need_flush = nve4_validate_tsc(nvc0, 0); | |||
| @@ -329,17 +329,17 @@ nve4_m2mf_copy_linear(struct nouveau_context *nv, | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt) | |||
| { | |||
| if (mt->base.domain == NOUVEAU_BO_VRAM) | |||
| return FALSE; | |||
| return false; | |||
| if (mt->base.base.usage != PIPE_USAGE_STAGING) | |||
| return FALSE; | |||
| return false; | |||
| return !nouveau_bo_memtype(mt->base.bo); | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage) | |||
| { | |||
| if (!mt->base.mm) { | |||
| @@ -61,8 +61,8 @@ nvc0_vertex_state_create(struct pipe_context *pipe, | |||
| so->num_elements = num_elements; | |||
| so->instance_elts = 0; | |||
| so->instance_bufs = 0; | |||
| so->shared_slots = FALSE; | |||
| so->need_conversion = FALSE; | |||
| so->shared_slots = false; | |||
| so->need_conversion = false; | |||
| memset(so->vb_access_size, 0, sizeof(so->vb_access_size)); | |||
| @@ -93,7 +93,7 @@ nvc0_vertex_state_create(struct pipe_context *pipe, | |||
| return NULL; | |||
| } | |||
| so->element[i].state = nvc0_format_table[fmt].vtx; | |||
| so->need_conversion = TRUE; | |||
| so->need_conversion = true; | |||
| } | |||
| size = util_format_get_blocksize(fmt); | |||
| @@ -141,7 +141,7 @@ nvc0_vertex_state_create(struct pipe_context *pipe, | |||
| if (so->instance_elts || src_offset_max >= (1 << 14)) | |||
| return so; | |||
| so->shared_slots = TRUE; | |||
| so->shared_slots = true; | |||
| for (i = 0; i < num_elements; ++i) { | |||
| const unsigned b = elements[i].vertex_buffer_index; | |||
| @@ -265,7 +265,7 @@ nvc0_update_user_vbufs(struct nvc0_context *nvc0) | |||
| PUSH_DATAh(push, address[b] + ve->src_offset); | |||
| PUSH_DATA (push, address[b] + ve->src_offset); | |||
| } | |||
| nvc0->base.vbo_dirty = TRUE; | |||
| nvc0->base.vbo_dirty = true; | |||
| } | |||
| static void | |||
| @@ -419,7 +419,7 @@ nvc0_vertex_arrays_validate(struct nvc0_context *nvc0) | |||
| uint32_t const_vbos; | |||
| unsigned i; | |||
| uint8_t vbo_mode; | |||
| boolean update_vertex; | |||
| bool update_vertex; | |||
| nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX); | |||
| @@ -559,7 +559,7 @@ nvc0_draw_vbo_kick_notify(struct nouveau_pushbuf *push) | |||
| { | |||
| struct nvc0_screen *screen = push->user_priv; | |||
| nouveau_fence_update(&screen->base, TRUE); | |||
| nouveau_fence_update(&screen->base, true); | |||
| NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1); | |||
| } | |||
| @@ -695,7 +695,7 @@ nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push, | |||
| } | |||
| static void | |||
| nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten, | |||
| nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten, | |||
| unsigned mode, unsigned start, unsigned count, | |||
| unsigned instance_count, int32_t index_bias) | |||
| { | |||
| @@ -836,7 +836,7 @@ nvc0_draw_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info) | |||
| } | |||
| static INLINE void | |||
| nvc0_update_prim_restart(struct nvc0_context *nvc0, boolean en, uint32_t index) | |||
| nvc0_update_prim_restart(struct nvc0_context *nvc0, bool en, uint32_t index) | |||
| { | |||
| struct nouveau_pushbuf *push = nvc0->base.pushbuf; | |||
| @@ -910,13 +910,13 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| continue; | |||
| if (res->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nvc0->cb_dirty = TRUE; | |||
| nvc0->cb_dirty = true; | |||
| } | |||
| } | |||
| if (nvc0->cb_dirty) { | |||
| IMMED_NVC0(push, NVC0_3D(MEM_BARRIER), 0x1011); | |||
| nvc0->cb_dirty = FALSE; | |||
| nvc0->cb_dirty = false; | |||
| } | |||
| if (nvc0->state.vbo_mode) { | |||
| @@ -940,19 +940,19 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| if (!nvc0->vtxbuf[i].buffer) | |||
| continue; | |||
| if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nvc0->base.vbo_dirty = TRUE; | |||
| nvc0->base.vbo_dirty = true; | |||
| } | |||
| if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer && | |||
| nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) | |||
| nvc0->base.vbo_dirty = TRUE; | |||
| nvc0->base.vbo_dirty = true; | |||
| nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index); | |||
| if (nvc0->base.vbo_dirty) { | |||
| if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS) | |||
| IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0); | |||
| nvc0->base.vbo_dirty = FALSE; | |||
| nvc0->base.vbo_dirty = false; | |||
| } | |||
| if (unlikely(info->indirect)) { | |||
| @@ -962,10 +962,10 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) | |||
| nvc0_draw_stream_output(nvc0, info); | |||
| } else | |||
| if (info->indexed) { | |||
| boolean shorten = info->max_index <= 65535; | |||
| bool shorten = info->max_index <= 65535; | |||
| if (info->primitive_restart && info->restart_index > 65535) | |||
| shorten = FALSE; | |||
| shorten = false; | |||
| nvc0_draw_elements(nvc0, shorten, | |||
| info->mode, info->start, info->count, | |||
| @@ -21,12 +21,12 @@ struct push_context { | |||
| uint32_t restart_index; | |||
| uint32_t instance_id; | |||
| boolean prim_restart; | |||
| boolean need_vertex_id; | |||
| bool prim_restart; | |||
| bool need_vertex_id; | |||
| struct { | |||
| boolean enabled; | |||
| boolean value; | |||
| bool enabled; | |||
| bool value; | |||
| unsigned stride; | |||
| const uint8_t *data; | |||
| } edgeflag; | |||
| @@ -47,7 +47,7 @@ nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx) | |||
| ctx->need_vertex_id = | |||
| nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32); | |||
| ctx->edgeflag.value = TRUE; | |||
| ctx->edgeflag.value = true; | |||
| ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS; | |||
| /* silence warnings */ | |||
| @@ -136,14 +136,14 @@ prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index) | |||
| return i; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| ef_value(const struct push_context *ctx, uint32_t index) | |||
| { | |||
| float *pf = (float *)&ctx->edgeflag.data[index * ctx->edgeflag.stride]; | |||
| return *pf ? TRUE : FALSE; | |||
| return *pf ? true : false; | |||
| } | |||
| static INLINE boolean | |||
| static INLINE bool | |||
| ef_toggle(struct push_context *ctx) | |||
| { | |||
| ctx->edgeflag.value = !ctx->edgeflag.value; | |||
| @@ -483,7 +483,7 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info) | |||
| struct pipe_context *pipe = &nvc0->base.pipe; | |||
| struct nvc0_so_target *targ; | |||
| targ = nvc0_so_target(info->count_from_stream_output); | |||
| pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count); | |||
| pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count); | |||
| vert_count /= targ->stride; | |||
| } | |||
| ctx.idxbuf = NULL; /* shut up warnings */ | |||
| @@ -250,7 +250,7 @@ nve4_compute_validate_surfaces(struct nvc0_context *nvc0) | |||
| static void | |||
| nve4_compute_validate_samplers(struct nvc0_context *nvc0) | |||
| { | |||
| boolean need_flush = nve4_validate_tsc(nvc0, 5); | |||
| bool need_flush = nve4_validate_tsc(nvc0, 5); | |||
| if (need_flush) { | |||
| BEGIN_NVC0(nvc0->base.pushbuf, NVE4_COMPUTE(TSC_FLUSH), 1); | |||
| PUSH_DATA (nvc0->base.pushbuf, 0); | |||
| @@ -299,11 +299,11 @@ nve4_compute_set_tex_handles(struct nvc0_context *nvc0) | |||
| } | |||
| static boolean | |||
| static bool | |||
| nve4_compute_state_validate(struct nvc0_context *nvc0) | |||
| { | |||
| if (!nvc0_compute_validate_program(nvc0)) | |||
| return FALSE; | |||
| return false; | |||
| if (nvc0->dirty_cp & NVC0_NEW_CP_TEXTURES) | |||
| nve4_compute_validate_textures(nvc0); | |||
| if (nvc0->dirty_cp & NVC0_NEW_CP_SAMPLERS) | |||
| @@ -316,15 +316,15 @@ nve4_compute_state_validate(struct nvc0_context *nvc0) | |||
| nvc0_validate_global_residents(nvc0, | |||
| nvc0->bufctx_cp, NVC0_BIND_CP_GLOBAL); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, FALSE); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, false); | |||
| nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_cp); | |||
| if (unlikely(nouveau_pushbuf_validate(nvc0->base.pushbuf))) | |||
| return FALSE; | |||
| return false; | |||
| if (unlikely(nvc0->state.flushed)) | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, TRUE); | |||
| nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true); | |||
| return TRUE; | |||
| return true; | |||
| } | |||
| @@ -505,7 +505,7 @@ nve4_compute_validate_textures(struct nvc0_context *nvc0) | |||
| for (i = 0; i < nvc0->num_textures[s]; ++i) { | |||
| struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]); | |||
| struct nv04_resource *res; | |||
| const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i)); | |||
| if (!tic) { | |||
| nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID; | |||
| @@ -575,18 +575,18 @@ nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc) | |||
| { | |||
| const uint32_t *data = (const uint32_t *)desc; | |||
| unsigned i; | |||
| boolean zero = FALSE; | |||
| bool zero = false; | |||
| debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n"); | |||
| for (i = 0; i < sizeof(*desc); i += 4) { | |||
| if (data[i / 4]) { | |||
| debug_printf("[%x]: 0x%08x\n", i, data[i / 4]); | |||
| zero = FALSE; | |||
| zero = false; | |||
| } else | |||
| if (!zero) { | |||
| debug_printf("...\n"); | |||
| zero = TRUE; | |||
| zero = true; | |||
| } | |||
| } | |||
| @@ -606,7 +606,7 @@ nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc) | |||
| for (i = 0; i < 8; ++i) { | |||
| uint64_t address; | |||
| uint32_t size = desc->cb[i].size; | |||
| boolean valid = !!(desc->cb_mask & (1 << i)); | |||
| bool valid = !!(desc->cb_mask & (1 << i)); | |||
| address = ((uint64_t)desc->cb[i].address_h << 32) | desc->cb[i].address_l; | |||
| @@ -17,7 +17,7 @@ static struct util_hash_table *fd_tab = NULL; | |||
| pipe_static_mutex(nouveau_screen_mutex); | |||
| boolean nouveau_drm_screen_unref(struct nouveau_screen *screen) | |||
| bool nouveau_drm_screen_unref(struct nouveau_screen *screen) | |||
| { | |||
| int ret; | |||
| if (screen->refcount == -1) | |||