2015-09-25 00:20:57 +02:00
|
|
|
/*
|
|
|
|
* This file is part of mpv.
|
|
|
|
*
|
2016-01-07 10:46:15 +01:00
|
|
|
* mpv is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2015-09-25 00:20:57 +02:00
|
|
|
*
|
|
|
|
* mpv is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-01-07 10:46:15 +01:00
|
|
|
* GNU Lesser General Public License for more details.
|
2015-09-25 00:20:57 +02:00
|
|
|
*
|
2016-01-07 10:46:15 +01:00
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
|
2015-09-25 00:20:57 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include <EGL/egl.h>
|
2015-09-27 16:10:22 +02:00
|
|
|
#include <EGL/eglext.h>
|
2015-09-25 00:20:57 +02:00
|
|
|
|
|
|
|
#include <va/va_drmcommon.h>
|
|
|
|
|
|
|
|
#include "hwdec.h"
|
|
|
|
#include "video/vaapi.h"
|
|
|
|
#include "video/img_fourcc.h"
|
2015-09-26 20:15:52 +02:00
|
|
|
#include "video/mp_image_pool.h"
|
2015-09-25 00:20:57 +02:00
|
|
|
#include "common.h"
|
|
|
|
|
2015-09-27 16:25:03 +02:00
|
|
|
#ifndef GL_OES_EGL_image
|
|
|
|
typedef void* GLeglImageOES;
|
|
|
|
#endif
|
|
|
|
#ifndef EGL_KHR_image
|
|
|
|
typedef void *EGLImageKHR;
|
|
|
|
#endif
|
|
|
|
|
2015-10-23 15:56:17 +02:00
|
|
|
#ifndef EGL_LINUX_DMA_BUF_EXT
|
|
|
|
#define EGL_LINUX_DMA_BUF_EXT 0x3270
|
2015-09-27 16:25:03 +02:00
|
|
|
#define EGL_LINUX_DRM_FOURCC_EXT 0x3271
|
|
|
|
#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272
|
|
|
|
#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273
|
|
|
|
#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274
|
|
|
|
#endif
|
|
|
|
|
2015-09-27 20:09:10 +02:00
|
|
|
#if HAVE_VAAPI_X11
|
|
|
|
#include <va/va_x11.h>
|
|
|
|
|
|
|
|
static VADisplay *create_x11_va_display(GL *gl)
|
|
|
|
{
|
|
|
|
Display *x11 = gl->MPGetNativeDisplay("x11");
|
|
|
|
return x11 ? vaGetDisplay(x11) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-27 21:24:35 +02:00
|
|
|
#if HAVE_VAAPI_WAYLAND
|
|
|
|
#include <va/va_wayland.h>
|
|
|
|
|
|
|
|
static VADisplay *create_wayland_va_display(GL *gl)
|
|
|
|
{
|
|
|
|
struct wl_display *wl = gl->MPGetNativeDisplay("wl");
|
|
|
|
return wl ? vaGetDisplayWl(wl) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-20 19:41:29 +01:00
|
|
|
#if HAVE_VAAPI_DRM
|
|
|
|
#include <va/va_drm.h>
|
|
|
|
|
|
|
|
static VADisplay *create_drm_va_display(GL *gl)
|
|
|
|
{
|
|
|
|
int drm_fd = (intptr_t)gl->MPGetNativeDisplay("drm");
|
|
|
|
// Note: yes, drm_fd==0 could be valid - but it's rare and doesn't fit with
|
|
|
|
// our slightly crappy way of passing it through, so consider 0 not
|
|
|
|
// valid.
|
|
|
|
return drm_fd ? vaGetDisplayDRM(drm_fd) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-21 13:32:29 +01:00
|
|
|
struct va_create_native {
|
|
|
|
VADisplay *(*create)(GL *gl);
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct va_create_native create_native_cbs[] = {
|
2015-09-27 20:09:10 +02:00
|
|
|
#if HAVE_VAAPI_X11
|
2016-01-21 13:32:29 +01:00
|
|
|
{create_x11_va_display},
|
2015-09-27 21:24:35 +02:00
|
|
|
#endif
|
|
|
|
#if HAVE_VAAPI_WAYLAND
|
2016-01-21 13:32:29 +01:00
|
|
|
{create_wayland_va_display},
|
2016-01-20 19:41:29 +01:00
|
|
|
#endif
|
|
|
|
#if HAVE_VAAPI_DRM
|
2016-01-21 13:32:29 +01:00
|
|
|
{create_drm_va_display},
|
2015-09-27 20:09:10 +02:00
|
|
|
#endif
|
2016-01-21 13:32:29 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static VADisplay *create_native_va_display(GL *gl)
|
|
|
|
{
|
|
|
|
if (!gl->MPGetNativeDisplay)
|
|
|
|
return NULL;
|
|
|
|
for (int n = 0; n < MP_ARRAY_SIZE(create_native_cbs); n++) {
|
|
|
|
VADisplay *display = create_native_cbs[n].create(gl);
|
|
|
|
if (display)
|
|
|
|
return display;
|
|
|
|
}
|
|
|
|
return NULL;
|
2015-09-27 20:09:10 +02:00
|
|
|
}
|
|
|
|
|
2015-09-25 00:20:57 +02:00
|
|
|
struct priv {
|
|
|
|
struct mp_log *log;
|
|
|
|
struct mp_vaapi_ctx *ctx;
|
|
|
|
VADisplay *display;
|
|
|
|
GLuint gl_textures[4];
|
|
|
|
EGLImageKHR images[4];
|
|
|
|
VAImage current_image;
|
2015-09-25 12:14:19 +02:00
|
|
|
bool buffer_acquired;
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
int current_mpfmt;
|
2015-09-27 16:10:22 +02:00
|
|
|
|
|
|
|
EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext,
|
|
|
|
EGLenum, EGLClientBuffer,
|
|
|
|
const EGLint *);
|
|
|
|
EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR);
|
|
|
|
void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES);
|
2015-09-25 00:20:57 +02:00
|
|
|
};
|
|
|
|
|
2015-09-26 20:15:52 +02:00
|
|
|
static bool test_format(struct gl_hwdec *hw);
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
static void unmap_frame(struct gl_hwdec *hw)
|
2015-09-25 00:20:57 +02:00
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
VAStatus status;
|
|
|
|
|
|
|
|
for (int n = 0; n < 4; n++) {
|
|
|
|
if (p->images[n])
|
2015-09-27 16:10:22 +02:00
|
|
|
p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]);
|
2015-09-25 10:02:54 +02:00
|
|
|
p->images[n] = 0;
|
2015-09-25 00:20:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
va_lock(p->ctx);
|
|
|
|
|
2015-09-25 12:14:19 +02:00
|
|
|
if (p->buffer_acquired) {
|
2015-09-25 00:20:57 +02:00
|
|
|
status = vaReleaseBufferHandle(p->display, p->current_image.buf);
|
|
|
|
CHECK_VA_STATUS(p, "vaReleaseBufferHandle()");
|
2015-09-25 12:14:19 +02:00
|
|
|
p->buffer_acquired = false;
|
2015-09-25 00:20:57 +02:00
|
|
|
}
|
|
|
|
if (p->current_image.image_id != VA_INVALID_ID) {
|
|
|
|
status = vaDestroyImage(p->display, p->current_image.image_id);
|
|
|
|
CHECK_VA_STATUS(p, "vaDestroyImage()");
|
|
|
|
p->current_image.image_id = VA_INVALID_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_unlock(p->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_textures(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
|
|
|
gl->DeleteTextures(4, p->gl_textures);
|
|
|
|
for (int n = 0; n < 4; n++)
|
|
|
|
p->gl_textures[n] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
unmap_frame(hw);
|
2015-09-25 00:20:57 +02:00
|
|
|
destroy_textures(hw);
|
2016-05-09 19:42:03 +02:00
|
|
|
if (p->ctx)
|
|
|
|
hwdec_devices_remove(hw->devs, &p->ctx->hwctx);
|
2015-09-25 00:20:57 +02:00
|
|
|
va_destroy(p->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int create(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
2015-09-27 16:10:22 +02:00
|
|
|
struct priv *p = talloc_zero(hw, struct priv);
|
|
|
|
hw->priv = p;
|
|
|
|
p->current_image.buf = p->current_image.image_id = VA_INVALID_ID;
|
|
|
|
p->log = hw->log;
|
|
|
|
|
2016-05-05 13:38:08 +02:00
|
|
|
if (!eglGetCurrentContext())
|
2015-09-25 00:20:57 +02:00
|
|
|
return -1;
|
|
|
|
|
2016-01-22 19:54:32 +01:00
|
|
|
const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS);
|
|
|
|
if (!exts)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!strstr(exts, "EXT_image_dma_buf_import") ||
|
|
|
|
!strstr(exts, "EGL_KHR_image_base") ||
|
2015-09-27 16:10:22 +02:00
|
|
|
!strstr(gl->extensions, "GL_OES_EGL_image") ||
|
2015-09-25 00:20:57 +02:00
|
|
|
!(gl->mpgl_caps & MPGL_CAP_TEX_RG))
|
|
|
|
return -1;
|
|
|
|
|
2015-09-27 16:10:22 +02:00
|
|
|
// EGL_KHR_image_base
|
|
|
|
p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
|
|
|
|
p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
|
|
|
|
// GL_OES_EGL_image
|
|
|
|
p->EGLImageTargetTexture2DOES =
|
|
|
|
(void *)eglGetProcAddress("glEGLImageTargetTexture2DOES");
|
|
|
|
|
|
|
|
if (!p->CreateImageKHR || !p->DestroyImageKHR ||
|
|
|
|
!p->EGLImageTargetTexture2DOES)
|
|
|
|
return -1;
|
|
|
|
|
2015-09-27 20:09:10 +02:00
|
|
|
p->display = create_native_va_display(gl);
|
2015-09-25 00:20:57 +02:00
|
|
|
if (!p->display)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
p->ctx = va_initialize(p->display, p->log, true);
|
|
|
|
if (!p->ctx) {
|
|
|
|
vaTerminate(p->display);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-11-09 11:58:38 +01:00
|
|
|
if (hw->probing && va_guess_if_emulated(p->ctx)) {
|
2015-09-25 00:20:57 +02:00
|
|
|
destroy(hw);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MP_VERBOSE(p, "using VAAPI EGL interop\n");
|
|
|
|
|
2015-09-26 20:15:52 +02:00
|
|
|
if (!test_format(hw)) {
|
|
|
|
destroy(hw);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-09 19:42:03 +02:00
|
|
|
p->ctx->hwctx.driver_name = hw->driver->name;
|
|
|
|
hwdec_devices_add(hw->devs, &p->ctx->hwctx);
|
2015-09-25 00:20:57 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reinit(struct gl_hwdec *hw, struct mp_image_params *params)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
|
|
|
// Recreate them to get rid of all previous image data (possibly).
|
|
|
|
destroy_textures(hw);
|
|
|
|
|
|
|
|
gl->GenTextures(4, p->gl_textures);
|
|
|
|
for (int n = 0; n < 4; n++) {
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
|
|
|
}
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, 0);
|
|
|
|
|
2016-07-15 11:54:44 +02:00
|
|
|
p->current_mpfmt = params->hw_subfmt;
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
if (p->current_mpfmt != IMGFMT_NV12 &&
|
|
|
|
p->current_mpfmt != IMGFMT_420P)
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 20:46:05 +02:00
|
|
|
{
|
|
|
|
MP_FATAL(p, "unsupported VA image format %s\n",
|
2016-07-15 11:54:44 +02:00
|
|
|
mp_imgfmt_to_name(p->current_mpfmt));
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 20:46:05 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-07-15 11:54:44 +02:00
|
|
|
MP_VERBOSE(p, "hw format: %s\n", mp_imgfmt_to_name(p->current_mpfmt));
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
|
|
|
|
params->imgfmt = p->current_mpfmt;
|
2016-07-15 12:01:30 +02:00
|
|
|
params->hw_subfmt = 0;
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 20:46:05 +02:00
|
|
|
|
2015-09-25 00:20:57 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ADD_ATTRIB(name, value) \
|
|
|
|
do { \
|
|
|
|
assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \
|
|
|
|
attribs[num_attribs++] = (name); \
|
|
|
|
attribs[num_attribs++] = (value); \
|
|
|
|
attribs[num_attribs] = EGL_NONE; \
|
|
|
|
} while(0)
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
static int map_frame(struct gl_hwdec *hw, struct mp_image *hw_image,
|
|
|
|
struct gl_hwdec_frame *out_frame)
|
2015-09-25 00:20:57 +02:00
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
VAStatus status;
|
|
|
|
VAImage *va_image = &p->current_image;
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
unmap_frame(hw);
|
2015-09-25 00:20:57 +02:00
|
|
|
|
|
|
|
va_lock(p->ctx);
|
|
|
|
|
|
|
|
status = vaDeriveImage(p->display, va_surface_id(hw_image), va_image);
|
|
|
|
if (!CHECK_VA_STATUS(p, "vaDeriveImage()"))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc);
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
if (p->current_mpfmt != mpfmt) {
|
2015-09-26 20:15:52 +02:00
|
|
|
MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n",
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
mp_imgfmt_to_name(p->current_mpfmt), mp_imgfmt_to_name(mpfmt));
|
2015-09-26 20:15:52 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2015-09-25 00:20:57 +02:00
|
|
|
VABufferInfo buffer_info = {.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME};
|
|
|
|
status = vaAcquireBufferHandle(p->display, va_image->buf, &buffer_info);
|
|
|
|
if (!CHECK_VA_STATUS(p, "vaAcquireBufferHandle()"))
|
|
|
|
goto err;
|
2015-09-25 12:14:19 +02:00
|
|
|
p->buffer_acquired = true;
|
2015-09-25 00:20:57 +02:00
|
|
|
|
2015-09-25 10:22:10 +02:00
|
|
|
struct mp_image layout = {0};
|
|
|
|
mp_image_set_params(&layout, &hw_image->params);
|
|
|
|
mp_image_setfmt(&layout, mpfmt);
|
2015-09-25 00:20:57 +02:00
|
|
|
|
|
|
|
// (it would be nice if we could use EGL_IMAGE_INTERNAL_FORMAT_EXT)
|
2015-09-25 10:55:02 +02:00
|
|
|
int drm_fmts[4] = {MP_FOURCC('R', '8', ' ', ' '), // DRM_FORMAT_R8
|
|
|
|
MP_FOURCC('G', 'R', '8', '8'), // DRM_FORMAT_GR88
|
|
|
|
MP_FOURCC('R', 'G', '2', '4'), // DRM_FORMAT_RGB888
|
|
|
|
MP_FOURCC('R', 'A', '2', '4')}; // DRM_FORMAT_RGBA8888
|
2015-09-25 00:20:57 +02:00
|
|
|
|
2015-09-25 10:22:10 +02:00
|
|
|
for (int n = 0; n < layout.num_planes; n++) {
|
|
|
|
int attribs[20] = {EGL_NONE};
|
2015-09-25 00:20:57 +02:00
|
|
|
int num_attribs = 0;
|
|
|
|
|
2015-09-25 10:22:10 +02:00
|
|
|
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmts[layout.fmt.bytes[n] - 1]);
|
|
|
|
ADD_ATTRIB(EGL_WIDTH, mp_image_plane_w(&layout, n));
|
|
|
|
ADD_ATTRIB(EGL_HEIGHT, mp_image_plane_h(&layout, n));
|
2015-09-25 00:20:57 +02:00
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle);
|
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]);
|
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]);
|
|
|
|
|
2015-09-27 16:10:22 +02:00
|
|
|
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
|
2015-09-25 00:20:57 +02:00
|
|
|
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
|
|
|
|
if (!p->images[n])
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
|
2015-09-27 16:10:22 +02:00
|
|
|
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
|
2015-09-25 00:20:57 +02:00
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
out_frame->planes[n] = (struct gl_hwdec_plane){
|
|
|
|
.gl_texture = p->gl_textures[n],
|
|
|
|
.gl_target = GL_TEXTURE_2D,
|
|
|
|
.tex_w = mp_image_plane_w(&layout, n),
|
|
|
|
.tex_h = mp_image_plane_h(&layout, n),
|
|
|
|
};
|
2015-09-25 00:20:57 +02:00
|
|
|
}
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, 0);
|
|
|
|
|
2015-09-25 12:07:20 +02:00
|
|
|
if (va_image->format.fourcc == VA_FOURCC_YV12)
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
MPSWAP(struct gl_hwdec_plane, out_frame->planes[1], out_frame->planes[2]);
|
2015-09-25 12:07:20 +02:00
|
|
|
|
2015-09-25 00:20:57 +02:00
|
|
|
va_unlock(p->ctx);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
va_unlock(p->ctx);
|
|
|
|
MP_FATAL(p, "mapping VAAPI EGL image failed\n");
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
unmap_frame(hw);
|
2015-09-25 00:20:57 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-09-26 20:15:52 +02:00
|
|
|
static bool test_format(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
bool ok = false;
|
|
|
|
|
|
|
|
struct mp_image_pool *alloc = mp_image_pool_new(1);
|
|
|
|
va_pool_set_allocator(alloc, p->ctx, VA_RT_FORMAT_YUV420);
|
|
|
|
struct mp_image *surface = mp_image_pool_get(alloc, IMGFMT_VAAPI, 64, 64);
|
|
|
|
if (surface) {
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 20:46:05 +02:00
|
|
|
va_surface_init_subformat(surface);
|
2015-09-26 20:15:52 +02:00
|
|
|
struct mp_image_params params = surface->params;
|
|
|
|
if (reinit(hw, ¶ms) >= 0) {
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
struct gl_hwdec_frame frame = {0};
|
|
|
|
ok = map_frame(hw, surface, &frame) >= 0;
|
2015-09-26 20:15:52 +02:00
|
|
|
}
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
unmap_frame(hw);
|
2015-09-26 20:15:52 +02:00
|
|
|
}
|
|
|
|
talloc_free(surface);
|
|
|
|
talloc_free(alloc);
|
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2015-09-25 00:20:57 +02:00
|
|
|
const struct gl_hwdec_driver gl_hwdec_vaegl = {
|
2016-02-01 20:02:52 +01:00
|
|
|
.name = "vaapi-egl",
|
|
|
|
.api = HWDEC_VAAPI,
|
2015-09-25 00:20:57 +02:00
|
|
|
.imgfmt = IMGFMT_VAAPI,
|
|
|
|
.create = create,
|
|
|
|
.reinit = reinit,
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 18:29:10 +02:00
|
|
|
.map_frame = map_frame,
|
|
|
|
.unmap = unmap_frame,
|
2015-09-25 00:20:57 +02:00
|
|
|
.destroy = destroy,
|
|
|
|
};
|