@@ -47,20 +47,6 @@ vl_context_destroy(struct pipe_video_context *context) | |||
FREE(ctx); | |||
} | |||
static int | |||
vl_context_get_param(struct pipe_video_context *context, int param) | |||
{ | |||
struct vl_context *ctx = (struct vl_context*)context; | |||
assert(context); | |||
if (param == PIPE_CAP_NPOT_TEXTURES) | |||
return !ctx->pot_buffers; | |||
debug_printf("vl_context: Unknown PIPE_CAP %d\n", param); | |||
return 0; | |||
} | |||
static boolean | |||
vl_context_is_format_supported(struct pipe_video_context *context, | |||
enum pipe_format format, | |||
@@ -192,12 +178,15 @@ vl_context_create_decoder(struct pipe_video_context *context, | |||
{ | |||
struct vl_context *ctx = (struct vl_context*)context; | |||
unsigned buffer_width, buffer_height; | |||
bool pot_buffers; | |||
assert(context); | |||
assert(width > 0 && height > 0); | |||
pot_buffers = !ctx->base.screen->get_video_param(ctx->base.screen, profile, PIPE_VIDEO_CAP_NPOT_TEXTURES); | |||
buffer_width = ctx->pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH); | |||
buffer_height = ctx->pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT); | |||
buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH); | |||
buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT); | |||
switch (u_reduce_video_profile(profile)) { | |||
case PIPE_VIDEO_CODEC_MPEG12: | |||
@@ -219,16 +208,24 @@ vl_context_create_buffer(struct pipe_video_context *context, | |||
const enum pipe_format *resource_formats; | |||
struct pipe_video_buffer *result; | |||
unsigned buffer_width, buffer_height; | |||
bool pot_buffers; | |||
assert(context); | |||
assert(width > 0 && height > 0); | |||
pot_buffers = !ctx->base.screen->get_video_param | |||
( | |||
ctx->base.screen, | |||
PIPE_VIDEO_PROFILE_UNKNOWN, | |||
PIPE_VIDEO_CAP_NPOT_TEXTURES | |||
); | |||
resource_formats = vl_video_buffer_formats(ctx->pipe, buffer_format); | |||
if (!resource_formats) | |||
return NULL; | |||
buffer_width = ctx->pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH); | |||
buffer_height = ctx->pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT); | |||
buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH); | |||
buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT); | |||
result = vl_video_buffer_init(context, ctx->pipe, | |||
buffer_width, buffer_height, 1, | |||
@@ -252,7 +249,7 @@ vl_context_create_compositor(struct pipe_video_context *context) | |||
} | |||
struct pipe_video_context * | |||
vl_create_context(struct pipe_context *pipe, bool pot_buffers) | |||
vl_create_context(struct pipe_context *pipe) | |||
{ | |||
struct vl_context *ctx; | |||
@@ -264,7 +261,6 @@ vl_create_context(struct pipe_context *pipe, bool pot_buffers) | |||
ctx->base.screen = pipe->screen; | |||
ctx->base.destroy = vl_context_destroy; | |||
ctx->base.get_param = vl_context_get_param; | |||
ctx->base.is_format_supported = vl_context_is_format_supported; | |||
ctx->base.create_surface = vl_context_create_surface; | |||
ctx->base.create_sampler_view = vl_context_create_sampler_view; | |||
@@ -275,7 +271,6 @@ vl_create_context(struct pipe_context *pipe, bool pot_buffers) | |||
ctx->base.create_compositor = vl_context_create_compositor; | |||
ctx->pipe = pipe; | |||
ctx->pot_buffers = pot_buffers; | |||
return &ctx->base; | |||
} |
@@ -38,12 +38,11 @@ struct vl_context | |||
{ | |||
struct pipe_video_context base; | |||
struct pipe_context *pipe; | |||
bool pot_buffers; | |||
}; | |||
/* drivers can call this function in their pipe_video_context constructors and pass it | |||
an accelerated pipe_context along with suitable buffering modes, etc */ | |||
struct pipe_video_context * | |||
vl_create_context(struct pipe_context *pipe, bool pot_buffers); | |||
vl_create_context(struct pipe_context *pipe); | |||
#endif /* vl_context_h */ |
@@ -303,6 +303,18 @@ static float r300_get_paramf(struct pipe_screen* pscreen, enum pipe_cap param) | |||
} | |||
} | |||
static int r300_get_video_param(struct pipe_screen *screen, | |||
enum pipe_video_profile profile, | |||
enum pipe_video_cap param) | |||
{ | |||
switch (param) { | |||
case PIPE_VIDEO_CAP_NPOT_TEXTURES: | |||
return 0; | |||
default: | |||
return 0; | |||
} | |||
} | |||
static boolean r300_is_format_supported(struct pipe_screen* screen, | |||
enum pipe_format format, | |||
enum pipe_texture_target target, | |||
@@ -508,6 +520,7 @@ struct pipe_screen* r300_screen_create(struct radeon_winsys *rws) | |||
r300screen->screen.get_param = r300_get_param; | |||
r300screen->screen.get_shader_param = r300_get_shader_param; | |||
r300screen->screen.get_paramf = r300_get_paramf; | |||
r300screen->screen.get_video_param = r300_get_video_param; | |||
r300screen->screen.is_format_supported = r300_is_format_supported; | |||
r300screen->screen.context_create = r300_create_context; | |||
r300screen->screen.video_context_create = r300_video_create; |
@@ -34,5 +34,5 @@ r300_video_create(struct pipe_screen *screen, void *priv) | |||
if (!pipe) | |||
return NULL; | |||
return vl_create_context(pipe, false); | |||
return vl_create_context(pipe); | |||
} |
@@ -507,6 +507,18 @@ static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, e | |||
} | |||
} | |||
static int r600_get_video_param(struct pipe_screen *screen, | |||
enum pipe_video_profile profile, | |||
enum pipe_video_cap param) | |||
{ | |||
switch (param) { | |||
case PIPE_VIDEO_CAP_NPOT_TEXTURES: | |||
return 1; | |||
default: | |||
return 0; | |||
} | |||
} | |||
static boolean r600_is_format_supported(struct pipe_screen* screen, | |||
enum pipe_format format, | |||
enum pipe_texture_target target, | |||
@@ -653,6 +665,7 @@ struct pipe_screen *r600_screen_create(struct radeon *radeon) | |||
rscreen->screen.get_param = r600_get_param; | |||
rscreen->screen.get_shader_param = r600_get_shader_param; | |||
rscreen->screen.get_paramf = r600_get_paramf; | |||
rscreen->screen.get_video_param = r600_get_video_param; | |||
rscreen->screen.is_format_supported = r600_is_format_supported; | |||
rscreen->screen.context_create = r600_create_context; | |||
rscreen->screen.video_context_create = r600_video_create; |
@@ -40,5 +40,5 @@ r600_video_create(struct pipe_screen *screen, void *priv) | |||
if (!pipe) | |||
return NULL; | |||
return vl_create_context(pipe, false); | |||
return vl_create_context(pipe); | |||
} |
@@ -171,6 +171,18 @@ softpipe_get_paramf(struct pipe_screen *screen, enum pipe_cap param) | |||
} | |||
} | |||
static int | |||
softpipe_get_video_param(struct pipe_screen *screen, | |||
enum pipe_video_profile profile, | |||
enum pipe_video_cap param) | |||
{ | |||
switch (param) { | |||
case PIPE_VIDEO_CAP_NPOT_TEXTURES: | |||
return 0; | |||
default: | |||
return 0; | |||
} | |||
} | |||
/** | |||
* Query format support for creating a texture, drawing surface, etc. | |||
@@ -299,7 +311,7 @@ sp_video_create(struct pipe_screen *screen, void *priv) | |||
return NULL; | |||
/* TODO: Use slice buffering for softpipe when implemented, no advantage to buffering an entire picture with softpipe */ | |||
return vl_create_context(pipe, true); | |||
return vl_create_context(pipe); | |||
} | |||
/** | |||
@@ -324,6 +336,7 @@ softpipe_create_screen(struct sw_winsys *winsys) | |||
screen->base.get_param = softpipe_get_param; | |||
screen->base.get_shader_param = softpipe_get_shader_param; | |||
screen->base.get_paramf = softpipe_get_paramf; | |||
screen->base.get_video_param = softpipe_get_video_param; | |||
screen->base.is_format_supported = softpipe_is_format_supported; | |||
screen->base.context_create = softpipe_create_context; | |||
screen->base.flush_frontbuffer = softpipe_flush_frontbuffer; |
@@ -493,6 +493,11 @@ enum pipe_shader_cap | |||
PIPE_SHADER_CAP_SUBROUTINES = 16, /* BGNSUB, ENDSUB, CAL, RET */ | |||
}; | |||
/* Video caps, can be different for each codec/profile */ | |||
enum pipe_video_cap | |||
{ | |||
PIPE_VIDEO_CAP_NPOT_TEXTURES = 0, | |||
}; | |||
enum pipe_video_codec | |||
{ |
@@ -92,6 +92,12 @@ struct pipe_screen { | |||
*/ | |||
int (*get_shader_param)( struct pipe_screen *, unsigned shader, enum pipe_shader_cap param ); | |||
/** | |||
* Query an integer-valued capability/parameter/limit for a codec/profile | |||
* \param param one of PIPE_VIDEO_CAP_x | |||
*/ | |||
int (*get_video_param)( struct pipe_screen *, enum pipe_video_profile profile, enum pipe_video_cap param ); | |||
struct pipe_context * (*context_create)( struct pipe_screen *, void *priv ); | |||
struct pipe_video_context * (*video_context_create)( struct pipe_screen *screen, void *priv ); |
@@ -55,12 +55,6 @@ struct pipe_video_context | |||
*/ | |||
void (*destroy)(struct pipe_video_context *context); | |||
/** | |||
* Query an integer-valued capability/parameter/limit | |||
* \param param one of PIPE_CAP_x | |||
*/ | |||
int (*get_param)(struct pipe_video_context *context, int param); | |||
/** | |||
* Check if the given pipe_format is supported as a video buffer | |||
*/ |
@@ -232,7 +232,9 @@ Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture * | |||
tex_templ.target = PIPE_TEXTURE_2D; | |||
tex_templ.format = XvIDToPipe(xvimage_id); | |||
tex_templ.last_level = 0; | |||
if (vpipe->get_param(vpipe, PIPE_CAP_NPOT_TEXTURES)) { | |||
if (vpipe->screen->get_video_param(vpipe->screen, | |||
PIPE_VIDEO_PROFILE_UNKNOWN, | |||
PIPE_VIDEO_CAP_NPOT_TEXTURES)) { | |||
tex_templ.width0 = width; | |||
tex_templ.height0 = height; | |||
} |