2009-02-16 21:58:13 +01:00
|
|
|
/*
|
|
|
|
* VDPAU video output driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 NVIDIA
|
2009-11-15 03:39:22 +01:00
|
|
|
* Copyright (C) 2009 Uoti Urpala
|
2009-02-16 21:58:13 +01:00
|
|
|
*
|
|
|
|
* This file is part of MPlayer.
|
|
|
|
*
|
|
|
|
* MPlayer is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* MPlayer is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
/*
|
2009-02-16 21:58:13 +01:00
|
|
|
* Actual decoding and presentation are implemented here.
|
|
|
|
* All necessary frame information is collected through
|
|
|
|
* the "vdpau_render_state" structure after parsing all headers
|
|
|
|
* etc. in libavcodec for different codecs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
2009-09-18 15:27:55 +02:00
|
|
|
#include <stdlib.h>
|
2009-05-06 22:49:51 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2009-11-15 03:39:22 +01:00
|
|
|
#include <limits.h>
|
2012-08-26 18:00:26 +02:00
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include <libavutil/common.h>
|
|
|
|
#include <libavcodec/vdpau.h>
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
#include "config.h"
|
2012-11-09 01:06:43 +01:00
|
|
|
#include "core/mp_msg.h"
|
|
|
|
#include "core/options.h"
|
2009-05-06 20:04:37 +02:00
|
|
|
#include "talloc.h"
|
2012-11-09 01:06:43 +01:00
|
|
|
#include "vo.h"
|
2009-02-16 21:58:13 +01:00
|
|
|
#include "x11_common.h"
|
|
|
|
#include "aspect.h"
|
2012-11-09 01:06:43 +01:00
|
|
|
#include "video/csputils.h"
|
2011-01-26 18:40:52 +01:00
|
|
|
#include "sub/sub.h"
|
2012-11-09 01:06:43 +01:00
|
|
|
#include "core/m_option.h"
|
|
|
|
#include "video/vfcap.h"
|
|
|
|
#include "video/mp_image.h"
|
2009-09-07 01:02:24 +02:00
|
|
|
#include "osdep/timer.h"
|
2012-08-26 18:00:26 +02:00
|
|
|
#include "bitmap_packer.h"
|
2009-02-23 10:21:57 +01:00
|
|
|
|
2010-05-06 03:46:10 +02:00
|
|
|
#define WRAP_ADD(x, a, m) ((a) < 0 \
|
|
|
|
? ((x)+(a)+(m) < (m) ? (x)+(a)+(m) : (x)+(a)) \
|
|
|
|
: ((x)+(a) < (m) ? (x)+(a) : (x)+(a)-(m)))
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
#define CHECK_ST_ERROR(message) \
|
2009-05-08 19:57:22 +02:00
|
|
|
do { \
|
|
|
|
if (vdp_st != VDP_STATUS_OK) { \
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] %s: %s\n", \
|
|
|
|
message, vdp->get_error_string(vdp_st)); \
|
|
|
|
return -1; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
#define CHECK_ST_WARNING(message) \
|
2009-05-08 19:57:22 +02:00
|
|
|
do { \
|
|
|
|
if (vdp_st != VDP_STATUS_OK) \
|
|
|
|
mp_msg(MSGT_VO, MSGL_WARN, "[ vdpau] %s: %s\n", \
|
|
|
|
message, vdp->get_error_string(vdp_st)); \
|
|
|
|
} while (0)
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
/* number of video and output surfaces */
|
2010-05-14 04:18:38 +02:00
|
|
|
#define MAX_OUTPUT_SURFACES 15
|
2009-02-16 21:58:13 +01:00
|
|
|
#define MAX_VIDEO_SURFACES 50
|
2011-12-04 17:10:17 +01:00
|
|
|
#define NUM_BUFFERED_VIDEO 5
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2011-10-06 20:46:01 +02:00
|
|
|
/* Pixelformat used for output surfaces */
|
|
|
|
#define OUTPUT_RGBA_FORMAT VDP_RGBA_FORMAT_B8G8R8A8
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
/*
|
|
|
|
* Global variable declaration - VDPAU specific
|
|
|
|
*/
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdp_functions {
|
|
|
|
#define VDP_FUNCTION(vdp_type, _, mp_name) vdp_type *mp_name;
|
|
|
|
#include "vdpau_template.c"
|
|
|
|
#undef VDP_FUNCTION
|
|
|
|
};
|
2009-02-28 14:20:01 +01:00
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx {
|
|
|
|
struct vdp_functions *vdp;
|
|
|
|
|
2009-05-06 22:42:24 +02:00
|
|
|
VdpDevice vdp_device;
|
2009-09-07 01:02:24 +02:00
|
|
|
bool is_preempted;
|
|
|
|
bool preemption_acked;
|
|
|
|
bool preemption_user_notified;
|
|
|
|
unsigned int last_preemption_retry_fail;
|
2009-05-06 22:42:24 +02:00
|
|
|
VdpGetProcAddress *vdp_get_proc_address;
|
|
|
|
|
|
|
|
VdpPresentationQueueTarget flip_target;
|
|
|
|
VdpPresentationQueue flip_queue;
|
2009-11-15 03:39:22 +01:00
|
|
|
uint64_t last_vdp_time;
|
|
|
|
unsigned int last_sync_update;
|
2009-05-06 22:42:24 +02:00
|
|
|
|
2012-08-26 18:00:26 +02:00
|
|
|
VdpOutputSurface output_surfaces[MAX_OUTPUT_SURFACES];
|
2012-10-21 00:48:01 +02:00
|
|
|
VdpOutputSurface screenshot_surface;
|
2010-05-14 04:18:38 +02:00
|
|
|
int num_output_surfaces;
|
2009-10-14 03:12:10 +02:00
|
|
|
struct buffered_video_surface {
|
|
|
|
VdpVideoSurface surface;
|
|
|
|
double pts;
|
|
|
|
mp_image_t *mpi;
|
|
|
|
} buffered_video[NUM_BUFFERED_VIDEO];
|
2009-09-18 15:27:55 +02:00
|
|
|
int deint_queue_pos;
|
2009-05-06 22:42:24 +02:00
|
|
|
int output_surface_width, output_surface_height;
|
|
|
|
|
|
|
|
VdpVideoMixer video_mixer;
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
struct mp_csp_details colorspace;
|
2009-05-06 22:42:24 +02:00
|
|
|
int deint;
|
|
|
|
int deint_type;
|
|
|
|
int deint_counter;
|
|
|
|
int pullup;
|
|
|
|
float denoise;
|
|
|
|
float sharpen;
|
2009-11-15 17:39:48 +01:00
|
|
|
int hqscaling;
|
2009-05-06 22:42:24 +02:00
|
|
|
int chroma_deint;
|
2010-02-05 19:13:33 +01:00
|
|
|
int flip_offset_window;
|
|
|
|
int flip_offset_fs;
|
2009-05-06 22:42:24 +02:00
|
|
|
int top_field_first;
|
2009-11-15 14:50:28 +01:00
|
|
|
bool flip;
|
2009-05-06 22:42:24 +02:00
|
|
|
|
|
|
|
VdpDecoder decoder;
|
|
|
|
int decoder_max_refs;
|
|
|
|
|
|
|
|
VdpRect src_rect_vid;
|
|
|
|
VdpRect out_rect_vid;
|
2012-10-27 22:10:32 +02:00
|
|
|
struct mp_osd_res osd_rect;
|
2009-05-06 22:42:24 +02:00
|
|
|
|
|
|
|
struct vdpau_render_state surface_render[MAX_VIDEO_SURFACES];
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
bool surface_in_use[MAX_VIDEO_SURFACES];
|
|
|
|
int surface_num; // indexes output_surfaces
|
2010-05-14 04:18:38 +02:00
|
|
|
int query_surface_num;
|
2009-11-15 03:39:22 +01:00
|
|
|
VdpTime recent_vsync_time;
|
|
|
|
float user_fps;
|
2012-07-27 02:40:38 +02:00
|
|
|
int composite_detect;
|
2009-11-15 03:39:22 +01:00
|
|
|
unsigned int vsync_interval;
|
|
|
|
uint64_t last_queue_time;
|
2010-05-14 04:18:38 +02:00
|
|
|
uint64_t queue_time[MAX_OUTPUT_SURFACES];
|
2009-11-15 03:39:22 +01:00
|
|
|
uint64_t last_ideal_time;
|
|
|
|
bool dropped_frame;
|
|
|
|
uint64_t dropped_time;
|
2009-05-06 22:42:24 +02:00
|
|
|
uint32_t vid_width, vid_height;
|
|
|
|
uint32_t image_format;
|
|
|
|
VdpChromaType vdp_chroma_type;
|
|
|
|
VdpYCbCrFormat vdp_pixel_format;
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
// OSD
|
|
|
|
struct osd_bitmap_surface {
|
2012-08-31 14:42:30 +02:00
|
|
|
VdpRGBAFormat format;
|
2012-08-26 18:00:26 +02:00
|
|
|
VdpBitmapSurface surface;
|
|
|
|
uint32_t max_width;
|
|
|
|
uint32_t max_height;
|
|
|
|
struct bitmap_packer *packer;
|
2012-09-28 21:49:32 +02:00
|
|
|
// List of surfaces to be rendered
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_target {
|
2012-09-28 21:49:32 +02:00
|
|
|
VdpRect source;
|
|
|
|
VdpRect dest;
|
|
|
|
VdpColor color;
|
|
|
|
} *targets;
|
2012-09-28 21:49:09 +02:00
|
|
|
int targets_size;
|
|
|
|
int render_count;
|
|
|
|
int bitmap_id;
|
|
|
|
int bitmap_pos_id;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
} osd_surfaces[MAX_OSD_PARTS];
|
2009-05-06 22:49:51 +02:00
|
|
|
|
|
|
|
// Video equalizer
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
struct mp_csp_equalizer video_eq;
|
2009-05-06 22:49:51 +02:00
|
|
|
};
|
2009-02-16 21:58:13 +01:00
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static bool status_ok(struct vo *vo);
|
|
|
|
|
2009-11-15 03:39:22 +01:00
|
|
|
static int change_vdptime_sync(struct vdpctx *vc, unsigned int *t)
|
|
|
|
{
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
VdpTime vdp_time;
|
|
|
|
vdp_st = vdp->presentation_queue_get_time(vc->flip_queue, &vdp_time);
|
|
|
|
CHECK_ST_ERROR("Error when calling vdp_presentation_queue_get_time");
|
|
|
|
unsigned int t1 = *t;
|
|
|
|
unsigned int t2 = GetTimer();
|
|
|
|
uint64_t old = vc->last_vdp_time + (t1 - vc->last_sync_update) * 1000ULL;
|
2012-12-28 10:46:02 +01:00
|
|
|
if (vdp_time > old) {
|
2009-11-15 03:39:22 +01:00
|
|
|
if (vdp_time > old + (t2 - t1) * 1000ULL)
|
|
|
|
vdp_time -= (t2 - t1) * 1000ULL;
|
|
|
|
else
|
|
|
|
vdp_time = old;
|
2012-12-28 10:46:02 +01:00
|
|
|
}
|
2011-07-04 23:48:41 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_DBG2, "[vdpau] adjusting VdpTime offset by %f µs\n",
|
2009-11-15 03:39:22 +01:00
|
|
|
(int64_t)(vdp_time - old) / 1000.);
|
|
|
|
vc->last_vdp_time = vdp_time;
|
|
|
|
vc->last_sync_update = t1;
|
|
|
|
*t = t2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t sync_vdptime(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
|
|
|
unsigned int t = GetTimer();
|
|
|
|
if (t - vc->last_sync_update > 5000000)
|
|
|
|
change_vdptime_sync(vc, &t);
|
|
|
|
uint64_t now = (t - vc->last_sync_update) * 1000ULL + vc->last_vdp_time;
|
|
|
|
// Make sure nanosecond inaccuracies don't make things inconsistent
|
|
|
|
now = FFMAX(now, vc->recent_vsync_time);
|
|
|
|
return now;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t convert_to_vdptime(struct vo *vo, unsigned int t)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
return (int)(t - vc->last_sync_update) * 1000LL + vc->last_vdp_time;
|
|
|
|
}
|
|
|
|
|
2011-10-06 20:46:01 +02:00
|
|
|
static int render_video_to_output_surface(struct vo *vo,
|
|
|
|
VdpOutputSurface output_surface,
|
|
|
|
VdpRect *output_rect)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpTime dummy;
|
|
|
|
VdpStatus vdp_st;
|
2009-09-18 15:27:55 +02:00
|
|
|
if (vc->deint_queue_pos < 0)
|
2009-09-18 23:00:42 +02:00
|
|
|
return -1;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-10-14 03:12:10 +02:00
|
|
|
struct buffered_video_surface *bv = vc->buffered_video;
|
2009-09-18 15:27:55 +02:00
|
|
|
int field = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME;
|
|
|
|
unsigned int dp = vc->deint_queue_pos;
|
|
|
|
// dp==0 means last field of latest frame, 1 earlier field of latest frame,
|
|
|
|
// 2 last field of previous frame and so on
|
|
|
|
if (vc->deint) {
|
|
|
|
field = vc->top_field_first ^ (dp & 1) ?
|
|
|
|
VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD:
|
|
|
|
VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD;
|
|
|
|
}
|
|
|
|
const VdpVideoSurface *past_fields = (const VdpVideoSurface []){
|
2009-10-14 03:12:10 +02:00
|
|
|
bv[(dp+1)/2].surface, bv[(dp+2)/2].surface};
|
2009-09-18 15:27:55 +02:00
|
|
|
const VdpVideoSurface *future_fields = (const VdpVideoSurface []){
|
2009-10-14 03:12:10 +02:00
|
|
|
dp >= 1 ? bv[(dp-1)/2].surface : VDP_INVALID_HANDLE};
|
2009-09-18 15:27:55 +02:00
|
|
|
vdp_st = vdp->presentation_queue_block_until_surface_idle(vc->flip_queue,
|
|
|
|
output_surface,
|
|
|
|
&dummy);
|
|
|
|
CHECK_ST_WARNING("Error when calling "
|
|
|
|
"vdp_presentation_queue_block_until_surface_idle");
|
|
|
|
|
|
|
|
vdp_st = vdp->video_mixer_render(vc->video_mixer, VDP_INVALID_HANDLE,
|
|
|
|
0, field, 2, past_fields,
|
2009-10-14 03:12:10 +02:00
|
|
|
bv[dp/2].surface, 1, future_fields,
|
2009-09-18 15:27:55 +02:00
|
|
|
&vc->src_rect_vid, output_surface,
|
2011-10-06 20:46:01 +02:00
|
|
|
NULL, output_rect, 0, NULL);
|
2009-09-18 15:27:55 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_video_mixer_render");
|
2009-09-18 23:00:42 +02:00
|
|
|
return 0;
|
2009-09-18 15:27:55 +02:00
|
|
|
}
|
2009-03-26 00:32:27 +01:00
|
|
|
|
2011-10-06 20:46:01 +02:00
|
|
|
static int video_to_output_surface(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
|
|
|
return render_video_to_output_surface(vo,
|
|
|
|
vc->output_surfaces[vc->surface_num],
|
|
|
|
&vc->out_rect_vid);
|
|
|
|
}
|
|
|
|
|
2011-12-04 17:10:17 +01:00
|
|
|
static int next_deint_queue_pos(struct vo *vo, bool eof)
|
2009-09-18 15:27:55 +02:00
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
2009-10-14 03:12:10 +02:00
|
|
|
int dqp = vc->deint_queue_pos;
|
|
|
|
if (dqp < 0)
|
|
|
|
dqp += 1000;
|
|
|
|
else
|
|
|
|
dqp = vc->deint >= 2 ? dqp - 1 : dqp - 2 | 1;
|
|
|
|
if (dqp < (eof ? 0 : 3))
|
2011-12-04 17:10:17 +01:00
|
|
|
return -1;
|
|
|
|
return dqp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_next_frame_info(struct vo *vo, bool eof)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-09-18 15:27:55 +02:00
|
|
|
|
2011-12-04 17:10:17 +01:00
|
|
|
vo->frame_loaded = false;
|
|
|
|
int dqp = next_deint_queue_pos(vo, eof);
|
|
|
|
if (dqp < 0)
|
|
|
|
return;
|
2009-09-18 15:27:55 +02:00
|
|
|
vo->frame_loaded = true;
|
2009-10-14 03:12:10 +02:00
|
|
|
|
|
|
|
// Set pts values
|
|
|
|
struct buffered_video_surface *bv = vc->buffered_video;
|
2011-12-04 17:10:17 +01:00
|
|
|
int idx = dqp >> 1;
|
2009-10-14 03:12:10 +02:00
|
|
|
if (idx == 0) { // no future frame/pts available
|
|
|
|
vo->next_pts = bv[0].pts;
|
|
|
|
vo->next_pts2 = MP_NOPTS_VALUE;
|
|
|
|
} else if (!(vc->deint >= 2)) { // no field-splitting deinterlace
|
|
|
|
vo->next_pts = bv[idx].pts;
|
|
|
|
vo->next_pts2 = bv[idx - 1].pts;
|
|
|
|
} else { // deinterlace with separate fields
|
|
|
|
double intermediate_pts;
|
|
|
|
double diff = bv[idx - 1].pts - bv[idx].pts;
|
2009-09-18 15:27:55 +02:00
|
|
|
if (diff > 0 && diff < 0.5)
|
2009-10-14 03:12:10 +02:00
|
|
|
intermediate_pts = (bv[idx].pts + bv[idx - 1].pts) / 2;
|
2009-09-18 15:27:55 +02:00
|
|
|
else
|
2009-10-14 03:12:10 +02:00
|
|
|
intermediate_pts = bv[idx].pts;
|
2011-12-04 17:10:17 +01:00
|
|
|
if (dqp & 1) { // first field
|
2009-10-14 03:12:10 +02:00
|
|
|
vo->next_pts = bv[idx].pts;
|
|
|
|
vo->next_pts2 = intermediate_pts;
|
|
|
|
} else {
|
|
|
|
vo->next_pts = intermediate_pts;
|
|
|
|
vo->next_pts2 = bv[idx - 1].pts;
|
|
|
|
}
|
|
|
|
}
|
2009-09-18 15:27:55 +02:00
|
|
|
}
|
|
|
|
|
2009-10-14 03:12:10 +02:00
|
|
|
static void add_new_video_surface(struct vo *vo, VdpVideoSurface surface,
|
|
|
|
struct mp_image *reserved_mpi, double pts)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct buffered_video_surface *bv = vc->buffered_video;
|
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
mp_image_unrefp(&bv[NUM_BUFFERED_VIDEO - 1].mpi);
|
2009-10-14 03:12:10 +02:00
|
|
|
|
|
|
|
for (int i = NUM_BUFFERED_VIDEO - 1; i > 0; i--)
|
|
|
|
bv[i] = bv[i - 1];
|
|
|
|
bv[0] = (struct buffered_video_surface){
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
.mpi = reserved_mpi ? mp_image_new_ref(reserved_mpi) : NULL,
|
2009-10-14 03:12:10 +02:00
|
|
|
.surface = surface,
|
|
|
|
.pts = pts,
|
|
|
|
};
|
|
|
|
|
2011-12-04 17:10:17 +01:00
|
|
|
vc->deint_queue_pos = FFMIN(vc->deint_queue_pos + 2,
|
|
|
|
NUM_BUFFERED_VIDEO * 2 - 3);
|
|
|
|
set_next_frame_info(vo, false);
|
2009-10-14 03:12:10 +02:00
|
|
|
}
|
|
|
|
|
2009-09-18 15:27:55 +02:00
|
|
|
static void forget_frames(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
2009-10-14 03:12:10 +02:00
|
|
|
vc->deint_queue_pos = -1001;
|
2009-11-15 03:39:22 +01:00
|
|
|
vc->dropped_frame = false;
|
2009-10-14 03:12:10 +02:00
|
|
|
for (int i = 0; i < NUM_BUFFERED_VIDEO; i++) {
|
|
|
|
struct buffered_video_surface *p = vc->buffered_video + i;
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
mp_image_unrefp(&p->mpi);
|
2009-10-14 03:12:10 +02:00
|
|
|
*p = (struct buffered_video_surface){
|
|
|
|
.surface = VDP_INVALID_HANDLE,
|
|
|
|
};
|
2009-02-24 22:46:25 +01:00
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static void resize(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
2012-10-27 22:10:32 +02:00
|
|
|
struct mp_rect src_rect;
|
|
|
|
struct mp_rect dst_rect;
|
|
|
|
vo_get_src_dst_rects(vo, &src_rect, &dst_rect, &vc->osd_rect);
|
|
|
|
vc->out_rect_vid.x0 = dst_rect.x0;
|
|
|
|
vc->out_rect_vid.x1 = dst_rect.x1;
|
|
|
|
vc->out_rect_vid.y0 = dst_rect.y0;
|
|
|
|
vc->out_rect_vid.y1 = dst_rect.y1;
|
|
|
|
vc->src_rect_vid.x0 = src_rect.x0;
|
|
|
|
vc->src_rect_vid.x1 = src_rect.x1;
|
|
|
|
vc->src_rect_vid.y0 = vc->flip ? src_rect.y1 : src_rect.y0;
|
|
|
|
vc->src_rect_vid.y1 = vc->flip ? src_rect.y0 : src_rect.y1;
|
2012-10-04 17:16:28 +02:00
|
|
|
|
2010-02-05 19:13:33 +01:00
|
|
|
int flip_offset_ms = vo_fs ? vc->flip_offset_fs : vc->flip_offset_window;
|
|
|
|
vo->flip_queue_offset = flip_offset_ms / 1000.;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2012-10-21 00:48:01 +02:00
|
|
|
if (vc->output_surface_width < vo->dwidth
|
|
|
|
|| vc->output_surface_height < vo->dheight) {
|
|
|
|
if (vc->output_surface_width < vo->dwidth) {
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->output_surface_width += vc->output_surface_width >> 1;
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->output_surface_width = FFMAX(vc->output_surface_width,
|
2012-10-21 00:48:01 +02:00
|
|
|
vo->dwidth);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
2012-10-21 00:48:01 +02:00
|
|
|
if (vc->output_surface_height < vo->dheight) {
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->output_surface_height += vc->output_surface_height >> 1;
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->output_surface_height = FFMAX(vc->output_surface_height,
|
2012-10-21 00:48:01 +02:00
|
|
|
vo->dheight);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
// Creation of output_surfaces
|
2012-08-26 18:00:26 +02:00
|
|
|
for (int i = 0; i < vc->num_output_surfaces; i++)
|
2010-05-26 07:54:09 +02:00
|
|
|
if (vc->output_surfaces[i] != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->output_surface_destroy(vc->output_surfaces[i]);
|
|
|
|
CHECK_ST_WARNING("Error when calling "
|
|
|
|
"vdp_output_surface_destroy");
|
|
|
|
}
|
2012-08-26 18:00:26 +02:00
|
|
|
for (int i = 0; i < vc->num_output_surfaces; i++) {
|
2009-05-09 17:08:30 +02:00
|
|
|
vdp_st = vdp->output_surface_create(vc->vdp_device,
|
2011-10-06 20:46:01 +02:00
|
|
|
OUTPUT_RGBA_FORMAT,
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->output_surface_width,
|
|
|
|
vc->output_surface_height,
|
|
|
|
&vc->output_surfaces[i]);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_output_surface_create");
|
2011-07-04 23:48:41 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_DBG2, "vdpau out create: %u\n",
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->output_surfaces[i]);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
}
|
2011-12-05 05:36:20 +01:00
|
|
|
vo->want_redraw = true;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
static void preemption_callback(VdpDevice device, void *context)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = context;
|
|
|
|
vc->is_preempted = true;
|
|
|
|
vc->preemption_acked = false;
|
|
|
|
}
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
/* Initialize vdp_get_proc_address, called from preinit() */
|
2009-05-04 02:09:50 +02:00
|
|
|
static int win_x11_init_vdpau_procs(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-04 02:09:50 +02:00
|
|
|
struct vo_x11_state *x11 = vo->x11;
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
2012-07-12 18:04:57 +02:00
|
|
|
if (vc->vdp) // reinitialization after preemption
|
|
|
|
memset(vc->vdp, 0, sizeof(*vc->vdp));
|
|
|
|
else
|
|
|
|
vc->vdp = talloc_zero(vc, struct vdp_functions);
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
|
|
|
|
|
|
|
struct vdp_function {
|
|
|
|
const int id;
|
2009-05-06 20:04:37 +02:00
|
|
|
int offset;
|
2009-02-16 21:58:13 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct vdp_function *dsc;
|
|
|
|
|
|
|
|
static const struct vdp_function vdp_func[] = {
|
2009-05-06 20:04:37 +02:00
|
|
|
#define VDP_FUNCTION(_, macro_name, mp_name) {macro_name, offsetof(struct vdp_functions, mp_name)},
|
2009-05-05 23:17:21 +02:00
|
|
|
#include "vdpau_template.c"
|
|
|
|
#undef VDP_FUNCTION
|
2009-05-06 20:04:37 +02:00
|
|
|
{0, -1}
|
2009-02-16 21:58:13 +01:00
|
|
|
};
|
|
|
|
|
2010-12-20 05:42:04 +01:00
|
|
|
vdp_st = vdp_device_create_x11(x11->display, x11->screen, &vc->vdp_device,
|
2009-05-09 17:08:30 +02:00
|
|
|
&vc->vdp_get_proc_address);
|
2009-02-16 21:58:13 +01:00
|
|
|
if (vdp_st != VDP_STATUS_OK) {
|
2011-12-20 02:47:16 +01:00
|
|
|
if (vc->is_preempted)
|
|
|
|
mp_msg(MSGT_VO, MSGL_DBG2, "[vdpau] Error calling "
|
|
|
|
"vdp_device_create_x11 while preempted: %d\n", vdp_st);
|
|
|
|
else
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] Error when calling "
|
|
|
|
"vdp_device_create_x11: %d\n", vdp_st);
|
2009-02-16 21:58:13 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
vdp->get_error_string = NULL;
|
|
|
|
for (dsc = vdp_func; dsc->offset >= 0; dsc++) {
|
2009-05-06 22:42:24 +02:00
|
|
|
vdp_st = vc->vdp_get_proc_address(vc->vdp_device, dsc->id,
|
2009-05-06 20:04:37 +02:00
|
|
|
(void **)((char *)vdp + dsc->offset));
|
2009-02-16 21:58:13 +01:00
|
|
|
if (vdp_st != VDP_STATUS_OK) {
|
2009-05-09 17:08:30 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] Error when calling "
|
|
|
|
"vdp_get_proc_address(function id %d): %s\n", dsc->id,
|
|
|
|
vdp->get_error_string ? vdp->get_error_string(vdp_st) : "?");
|
2009-02-16 21:58:13 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2009-09-07 01:02:24 +02:00
|
|
|
vdp_st = vdp->preemption_callback_register(vc->vdp_device,
|
|
|
|
preemption_callback, vc);
|
2009-02-16 21:58:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static int win_x11_init_vdpau_flip_queue(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-05-04 02:09:50 +02:00
|
|
|
struct vo_x11_state *x11 = vo->x11;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
if (vc->flip_target == VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->presentation_queue_target_create_x11(vc->vdp_device,
|
|
|
|
x11->window,
|
|
|
|
&vc->flip_target);
|
|
|
|
CHECK_ST_ERROR("Error when calling "
|
|
|
|
"vdp_presentation_queue_target_create_x11");
|
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
/* Emperically this seems to be the first call which fails when we
|
|
|
|
* try to reinit after preemption while the user is still switched
|
|
|
|
* from X to a virtual terminal (creating the vdp_device initially
|
|
|
|
* succeeds, as does creating the flip_target above). This is
|
|
|
|
* probably not guaranteed behavior, but we'll assume it as a simple
|
|
|
|
* way to reduce warnings while trying to recover from preemption.
|
|
|
|
*/
|
|
|
|
if (vc->flip_queue == VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->presentation_queue_create(vc->vdp_device, vc->flip_target,
|
|
|
|
&vc->flip_queue);
|
|
|
|
if (vc->is_preempted && vdp_st != VDP_STATUS_OK) {
|
|
|
|
mp_msg(MSGT_VO, MSGL_DBG2, "[vdpau] Failed to create flip queue "
|
|
|
|
"while preempted: %s\n", vdp->get_error_string(vdp_st));
|
|
|
|
return -1;
|
|
|
|
} else
|
|
|
|
CHECK_ST_ERROR("Error when calling vdp_presentation_queue_create");
|
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-11-15 03:39:22 +01:00
|
|
|
VdpTime vdp_time;
|
|
|
|
vdp_st = vdp->presentation_queue_get_time(vc->flip_queue, &vdp_time);
|
|
|
|
CHECK_ST_ERROR("Error when calling vdp_presentation_queue_get_time");
|
|
|
|
vc->last_vdp_time = vdp_time;
|
|
|
|
vc->last_sync_update = GetTimer();
|
|
|
|
|
|
|
|
vc->vsync_interval = 1;
|
2012-07-27 02:40:38 +02:00
|
|
|
if (vc->composite_detect && vo_x11_screen_is_composited(vo)) {
|
|
|
|
mp_msg(MSGT_VO, MSGL_INFO, "[vdpau] Compositing window manager "
|
|
|
|
"detected. Assuming timing info is inaccurate.\n");
|
|
|
|
} else if (vc->user_fps > 0) {
|
2009-11-15 03:39:22 +01:00
|
|
|
vc->vsync_interval = 1e9 / vc->user_fps;
|
|
|
|
mp_msg(MSGT_VO, MSGL_INFO, "[vdpau] Assuming user-specified display "
|
|
|
|
"refresh rate of %.3f Hz.\n", vc->user_fps);
|
|
|
|
} else if (vc->user_fps == 0) {
|
|
|
|
#ifdef CONFIG_XF86VM
|
2013-01-27 07:21:10 +01:00
|
|
|
double fps = vo_x11_vm_get_fps(vo);
|
2009-11-15 03:39:22 +01:00
|
|
|
if (!fps)
|
|
|
|
mp_msg(MSGT_VO, MSGL_WARN, "[vdpau] Failed to get display FPS\n");
|
|
|
|
else {
|
|
|
|
vc->vsync_interval = 1e9 / fps;
|
|
|
|
// This is verbose, but I'm not yet sure how common wrong values are
|
|
|
|
mp_msg(MSGT_VO, MSGL_INFO,
|
|
|
|
"[vdpau] Got display refresh rate %.3f Hz.\n"
|
|
|
|
"[vdpau] If that value looks wrong give the "
|
|
|
|
"-vo vdpau:fps=X suboption manually.\n", fps);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
mp_msg(MSGT_VO, MSGL_INFO, "[vdpau] This binary has been compiled "
|
|
|
|
"without XF86VidMode support.\n");
|
|
|
|
mp_msg(MSGT_VO, MSGL_INFO, "[vdpau] Can't use vsync-aware timing "
|
|
|
|
"without manually provided -vo vdpau:fps=X suboption.\n");
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] framedrop/timing logic disabled by "
|
|
|
|
"user.\n");
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-17 05:18:14 +01:00
|
|
|
static int set_video_attribute(struct vdpctx *vc, VdpVideoMixerAttribute attr,
|
|
|
|
const void *value, char *attr_name)
|
|
|
|
{
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
|
|
|
|
vdp_st = vdp->video_mixer_set_attribute_values(vc->video_mixer, 1, &attr,
|
|
|
|
&value);
|
|
|
|
if (vdp_st != VDP_STATUS_OK) {
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] Error setting video mixer "
|
|
|
|
"attribute %s: %s\n", attr_name, vdp->get_error_string(vdp_st));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-15 14:21:40 +01:00
|
|
|
static void update_csc_matrix(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] Updating CSC matrix\n");
|
2009-11-15 14:21:40 +01:00
|
|
|
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
// VdpCSCMatrix happens to be compatible with mplayer's CSC matrix type
|
|
|
|
// both are float[3][4]
|
2009-11-15 14:21:40 +01:00
|
|
|
VdpCSCMatrix matrix;
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
|
2012-03-08 04:25:33 +01:00
|
|
|
struct mp_csp_params cparams = {
|
|
|
|
.colorspace = vc->colorspace, .input_bits = 8, .texture_bits = 8 };
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
mp_csp_copy_equalizer_values(&cparams, &vc->video_eq);
|
|
|
|
mp_get_yuv2rgb_coeffs(&cparams, matrix);
|
2010-05-12 02:52:43 +02:00
|
|
|
|
2009-11-17 05:18:14 +01:00
|
|
|
set_video_attribute(vc, VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX,
|
|
|
|
&matrix, "CSC matrix");
|
2009-11-15 14:21:40 +01:00
|
|
|
}
|
|
|
|
|
2009-11-17 05:18:14 +01:00
|
|
|
#define SET_VIDEO_ATTR(attr_name, attr_type, value) set_video_attribute(vc, \
|
|
|
|
VDP_VIDEO_MIXER_ATTRIBUTE_ ## attr_name, &(attr_type){value},\
|
|
|
|
# attr_name)
|
2009-05-06 20:04:37 +02:00
|
|
|
static int create_vdp_mixer(struct vo *vo, VdpChromaType vdp_chroma_type)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
#define VDP_NUM_MIXER_PARAMETER 3
|
2009-11-15 17:39:48 +01:00
|
|
|
#define MAX_NUM_FEATURES 6
|
2009-02-20 10:45:48 +01:00
|
|
|
int i;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
2009-09-07 01:02:24 +02:00
|
|
|
|
|
|
|
if (vc->video_mixer != VDP_INVALID_HANDLE)
|
|
|
|
return 0;
|
|
|
|
|
2009-02-20 10:45:48 +01:00
|
|
|
int feature_count = 0;
|
|
|
|
VdpVideoMixerFeature features[MAX_NUM_FEATURES];
|
|
|
|
VdpBool feature_enables[MAX_NUM_FEATURES];
|
2009-02-16 21:58:13 +01:00
|
|
|
static const VdpVideoMixerParameter parameters[VDP_NUM_MIXER_PARAMETER] = {
|
|
|
|
VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH,
|
|
|
|
VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT,
|
2009-05-09 17:08:30 +02:00
|
|
|
VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE,
|
2009-02-16 21:58:13 +01:00
|
|
|
};
|
|
|
|
const void *const parameter_values[VDP_NUM_MIXER_PARAMETER] = {
|
2009-05-06 22:42:24 +02:00
|
|
|
&vc->vid_width,
|
|
|
|
&vc->vid_height,
|
2009-05-09 17:08:30 +02:00
|
|
|
&vdp_chroma_type,
|
2009-02-16 21:58:13 +01:00
|
|
|
};
|
2009-03-15 22:26:10 +01:00
|
|
|
features[feature_count++] = VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL;
|
2011-01-04 22:51:31 +01:00
|
|
|
if (vc->deint_type == 4)
|
2009-05-09 17:08:30 +02:00
|
|
|
features[feature_count++] =
|
|
|
|
VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->pullup)
|
2009-02-20 10:45:48 +01:00
|
|
|
features[feature_count++] = VDP_VIDEO_MIXER_FEATURE_INVERSE_TELECINE;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->denoise)
|
2009-02-20 10:45:48 +01:00
|
|
|
features[feature_count++] = VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->sharpen)
|
2009-02-20 10:45:48 +01:00
|
|
|
features[feature_count++] = VDP_VIDEO_MIXER_FEATURE_SHARPNESS;
|
2009-11-15 17:39:48 +01:00
|
|
|
if (vc->hqscaling) {
|
|
|
|
VdpVideoMixerFeature hqscaling_feature =
|
|
|
|
VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1 + vc->hqscaling-1;
|
|
|
|
VdpBool hqscaling_available;
|
|
|
|
vdp_st = vdp->video_mixer_query_feature_support(vc->vdp_device,
|
|
|
|
hqscaling_feature,
|
|
|
|
&hqscaling_available);
|
|
|
|
CHECK_ST_ERROR("Error when calling video_mixer_query_feature_support");
|
|
|
|
if (hqscaling_available)
|
|
|
|
features[feature_count++] = hqscaling_feature;
|
|
|
|
else
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] Your hardware or VDPAU "
|
|
|
|
"library does not support requested hqscaling.\n");
|
2009-11-29 00:32:30 +01:00
|
|
|
}
|
2009-02-20 10:45:48 +01:00
|
|
|
|
2009-05-06 22:42:24 +02:00
|
|
|
vdp_st = vdp->video_mixer_create(vc->vdp_device, feature_count, features,
|
2009-05-09 17:08:30 +02:00
|
|
|
VDP_NUM_MIXER_PARAMETER,
|
|
|
|
parameters, parameter_values,
|
|
|
|
&vc->video_mixer);
|
2009-05-08 19:57:22 +02:00
|
|
|
CHECK_ST_ERROR("Error when calling vdp_video_mixer_create");
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
for (i = 0; i < feature_count; i++)
|
|
|
|
feature_enables[i] = VDP_TRUE;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->deint < 3)
|
2009-03-15 22:20:06 +01:00
|
|
|
feature_enables[0] = VDP_FALSE;
|
2011-01-04 22:51:31 +01:00
|
|
|
if (vc->deint_type == 4 && vc->deint < 4)
|
|
|
|
feature_enables[1] = VDP_FALSE;
|
2009-11-17 22:22:10 +01:00
|
|
|
if (feature_count) {
|
|
|
|
vdp_st = vdp->video_mixer_set_feature_enables(vc->video_mixer,
|
|
|
|
feature_count, features,
|
|
|
|
feature_enables);
|
|
|
|
CHECK_ST_WARNING("Error calling vdp_video_mixer_set_feature_enables");
|
|
|
|
}
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->denoise)
|
2009-11-17 05:18:14 +01:00
|
|
|
SET_VIDEO_ATTR(NOISE_REDUCTION_LEVEL, float, vc->denoise);
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->sharpen)
|
2009-11-17 05:18:14 +01:00
|
|
|
SET_VIDEO_ATTR(SHARPNESS_LEVEL, float, vc->sharpen);
|
2009-05-06 22:42:24 +02:00
|
|
|
if (!vc->chroma_deint)
|
2009-11-17 05:18:14 +01:00
|
|
|
SET_VIDEO_ATTR(SKIP_CHROMA_DEINTERLACE, uint8_t, 1);
|
2009-02-20 10:45:48 +01:00
|
|
|
|
2009-11-15 14:21:40 +01:00
|
|
|
update_csc_matrix(vo);
|
2009-02-16 21:58:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free everything specific to a certain video file
|
2009-05-06 20:04:37 +02:00
|
|
|
static void free_video_specific(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
int i;
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->decoder != VDP_INVALID_HANDLE)
|
|
|
|
vdp->decoder_destroy(vc->decoder);
|
|
|
|
vc->decoder = VDP_INVALID_HANDLE;
|
|
|
|
vc->decoder_max_refs = -1;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-10-14 03:12:10 +02:00
|
|
|
forget_frames(vo);
|
2009-03-18 18:02:29 +01:00
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
for (i = 0; i < MAX_VIDEO_SURFACES; i++) {
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->surface_render[i].surface != VDP_INVALID_HANDLE) {
|
2009-05-09 17:08:30 +02:00
|
|
|
vdp_st = vdp->video_surface_destroy(vc->surface_render[i].surface);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_video_surface_destroy");
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->surface_render[i].surface = VDP_INVALID_HANDLE;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->video_mixer != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->video_mixer_destroy(vc->video_mixer);
|
2009-05-08 19:57:22 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_video_mixer_destroy");
|
2009-02-23 10:22:57 +01:00
|
|
|
}
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->video_mixer = VDP_INVALID_HANDLE;
|
2012-10-21 00:48:01 +02:00
|
|
|
|
|
|
|
if (vc->screenshot_surface != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->output_surface_destroy(vc->screenshot_surface);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_output_surface_destroy");
|
|
|
|
}
|
|
|
|
vc->screenshot_surface = VDP_INVALID_HANDLE;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
static int create_vdp_decoder(struct vo *vo, int max_refs)
|
2009-03-21 18:10:19 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-03-21 18:10:19 +01:00
|
|
|
VdpStatus vdp_st;
|
|
|
|
VdpDecoderProfile vdp_decoder_profile;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (vc->decoder != VDP_INVALID_HANDLE)
|
|
|
|
vdp->decoder_destroy(vc->decoder);
|
|
|
|
switch (vc->image_format) {
|
2010-10-29 17:43:31 +02:00
|
|
|
case IMGFMT_VDPAU_MPEG1:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG1;
|
|
|
|
break;
|
|
|
|
case IMGFMT_VDPAU_MPEG2:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG2_MAIN;
|
|
|
|
break;
|
|
|
|
case IMGFMT_VDPAU_H264:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_H264_HIGH;
|
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] Creating H264 hardware decoder "
|
|
|
|
"for %d reference frames.\n", max_refs);
|
|
|
|
break;
|
|
|
|
case IMGFMT_VDPAU_WMV3:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_MAIN;
|
|
|
|
break;
|
|
|
|
case IMGFMT_VDPAU_VC1:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_ADVANCED;
|
|
|
|
break;
|
|
|
|
case IMGFMT_VDPAU_MPEG4:
|
|
|
|
vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG4_PART2_ASP;
|
|
|
|
break;
|
2010-10-29 17:38:22 +02:00
|
|
|
default:
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] Unknown image format!\n");
|
2010-12-20 05:42:04 +01:00
|
|
|
goto fail;
|
2009-03-21 18:10:19 +01:00
|
|
|
}
|
2009-05-06 22:42:24 +02:00
|
|
|
vdp_st = vdp->decoder_create(vc->vdp_device, vdp_decoder_profile,
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->vid_width, vc->vid_height, max_refs,
|
|
|
|
&vc->decoder);
|
2009-03-21 18:10:19 +01:00
|
|
|
CHECK_ST_WARNING("Failed creating VDPAU decoder");
|
|
|
|
if (vdp_st != VDP_STATUS_OK) {
|
2010-10-29 17:38:22 +02:00
|
|
|
fail:
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->decoder = VDP_INVALID_HANDLE;
|
|
|
|
vc->decoder_max_refs = 0;
|
2009-03-21 20:59:35 +01:00
|
|
|
return 0;
|
2009-03-21 18:10:19 +01:00
|
|
|
}
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->decoder_max_refs = max_refs;
|
2009-03-21 20:59:35 +01:00
|
|
|
return 1;
|
2009-03-21 18:10:19 +01:00
|
|
|
}
|
|
|
|
|
2009-10-22 03:21:14 +02:00
|
|
|
static int initialize_vdpau_objects(struct vo *vo)
|
2009-09-07 01:02:24 +02:00
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
|
|
|
vc->vdp_chroma_type = VDP_CHROMA_TYPE_420;
|
|
|
|
switch (vc->image_format) {
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 20:03:30 +01:00
|
|
|
case IMGFMT_420P:
|
2010-10-29 17:43:31 +02:00
|
|
|
vc->vdp_pixel_format = VDP_YCBCR_FORMAT_YV12;
|
|
|
|
break;
|
|
|
|
case IMGFMT_NV12:
|
|
|
|
vc->vdp_pixel_format = VDP_YCBCR_FORMAT_NV12;
|
|
|
|
break;
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 20:03:30 +01:00
|
|
|
case IMGFMT_YUYV:
|
2010-10-29 17:43:31 +02:00
|
|
|
vc->vdp_pixel_format = VDP_YCBCR_FORMAT_YUYV;
|
|
|
|
vc->vdp_chroma_type = VDP_CHROMA_TYPE_422;
|
|
|
|
break;
|
|
|
|
case IMGFMT_UYVY:
|
|
|
|
vc->vdp_pixel_format = VDP_YCBCR_FORMAT_UYVY;
|
|
|
|
vc->vdp_chroma_type = VDP_CHROMA_TYPE_422;
|
2009-09-07 01:02:24 +02:00
|
|
|
}
|
|
|
|
if (win_x11_init_vdpau_flip_queue(vo) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (create_vdp_mixer(vo, vc->vdp_chroma_type) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2009-09-18 15:27:55 +02:00
|
|
|
forget_frames(vo);
|
2009-09-07 01:02:24 +02:00
|
|
|
resize(vo);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_vdpau_objects_uninitialized(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
|
|
|
vc->decoder = VDP_INVALID_HANDLE;
|
|
|
|
for (int i = 0; i < MAX_VIDEO_SURFACES; i++)
|
|
|
|
vc->surface_render[i].surface = VDP_INVALID_HANDLE;
|
2009-09-18 15:27:55 +02:00
|
|
|
forget_frames(vo);
|
2009-09-07 01:02:24 +02:00
|
|
|
vc->video_mixer = VDP_INVALID_HANDLE;
|
|
|
|
vc->flip_queue = VDP_INVALID_HANDLE;
|
|
|
|
vc->flip_target = VDP_INVALID_HANDLE;
|
2012-08-26 18:00:26 +02:00
|
|
|
for (int i = 0; i < MAX_OUTPUT_SURFACES; i++)
|
2009-09-07 01:02:24 +02:00
|
|
|
vc->output_surfaces[i] = VDP_INVALID_HANDLE;
|
2012-10-21 00:48:01 +02:00
|
|
|
vc->screenshot_surface = VDP_INVALID_HANDLE;
|
2009-09-07 01:02:24 +02:00
|
|
|
vc->vdp_device = VDP_INVALID_HANDLE;
|
2012-09-28 21:49:09 +02:00
|
|
|
for (int i = 0; i < MAX_OSD_PARTS; i++) {
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_bitmap_surface *sfc = &vc->osd_surfaces[i];
|
2012-09-28 21:49:09 +02:00
|
|
|
talloc_free(sfc->packer);
|
|
|
|
sfc->bitmap_id = sfc->bitmap_pos_id = 0;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
*sfc = (struct osd_bitmap_surface){
|
2012-09-28 21:49:09 +02:00
|
|
|
.surface = VDP_INVALID_HANDLE,
|
|
|
|
};
|
|
|
|
}
|
2009-09-07 01:02:24 +02:00
|
|
|
vc->output_surface_width = vc->output_surface_height = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_preemption(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
|
|
|
if (!vc->is_preempted)
|
|
|
|
return 0;
|
|
|
|
if (!vc->preemption_acked)
|
|
|
|
mark_vdpau_objects_uninitialized(vo);
|
|
|
|
vc->preemption_acked = true;
|
|
|
|
if (!vc->preemption_user_notified) {
|
|
|
|
mp_tmsg(MSGT_VO, MSGL_ERR, "[vdpau] Got display preemption notice! "
|
|
|
|
"Will attempt to recover.\n");
|
|
|
|
vc->preemption_user_notified = true;
|
|
|
|
}
|
|
|
|
/* Trying to initialize seems to be quite slow, so only try once a
|
|
|
|
* second to avoid using 100% CPU. */
|
|
|
|
if (vc->last_preemption_retry_fail
|
|
|
|
&& GetTimerMS() - vc->last_preemption_retry_fail < 1000)
|
|
|
|
return -1;
|
|
|
|
if (win_x11_init_vdpau_procs(vo) < 0 || initialize_vdpau_objects(vo) < 0) {
|
|
|
|
vc->last_preemption_retry_fail = GetTimerMS() | 1;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
vc->last_preemption_retry_fail = 0;
|
|
|
|
vc->is_preempted = false;
|
|
|
|
vc->preemption_user_notified = false;
|
|
|
|
mp_tmsg(MSGT_VO, MSGL_INFO, "[vdpau] Recovered from display preemption.\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
/*
|
|
|
|
* connect to X server, create and map window, initialize all
|
|
|
|
* VDPAU objects, create different surfaces etc.
|
|
|
|
*/
|
2009-05-04 02:09:50 +02:00
|
|
|
static int config(struct vo *vo, uint32_t width, uint32_t height,
|
|
|
|
uint32_t d_width, uint32_t d_height, uint32_t flags,
|
2011-12-06 20:23:54 +01:00
|
|
|
uint32_t format)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 22:42:24 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
if (handle_preemption(vo) < 0)
|
|
|
|
return -1;
|
2009-11-15 14:50:28 +01:00
|
|
|
|
|
|
|
vc->flip = flags & VOFLAG_FLIPPING;
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->image_format = format;
|
|
|
|
vc->vid_width = width;
|
|
|
|
vc->vid_height = height;
|
2011-10-06 20:46:01 +02:00
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
free_video_specific(vo);
|
2009-05-06 22:42:24 +02:00
|
|
|
if (IMGFMT_IS_VDPAU(vc->image_format) && !create_vdp_decoder(vo, 2))
|
2009-03-23 00:58:40 +01:00
|
|
|
return -1;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2013-01-27 12:01:08 +01:00
|
|
|
vo_x11_create_vo_window(vo, NULL, vo->dx, vo->dy, d_width, d_height,
|
|
|
|
flags, "vdpau");
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
if (initialize_vdpau_objects(vo) < 0)
|
2009-02-16 21:58:13 +01:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static void check_events(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-09-07 01:02:24 +02:00
|
|
|
if (handle_preemption(vo) < 0)
|
|
|
|
return;
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
int e = vo_x11_check_events(vo);
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
if (e & VO_EVENT_RESIZE)
|
2009-05-04 02:09:50 +02:00
|
|
|
resize(vo);
|
2011-12-05 05:36:20 +01:00
|
|
|
else if (e & VO_EVENT_EXPOSE) {
|
|
|
|
vo->want_redraw = true;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-26 18:00:26 +02:00
|
|
|
static struct bitmap_packer *make_packer(struct vo *vo, VdpRGBAFormat format)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-05-09 17:08:30 +02:00
|
|
|
|
2012-08-26 18:00:26 +02:00
|
|
|
struct bitmap_packer *packer = talloc_zero(vo, struct bitmap_packer);
|
|
|
|
uint32_t w_max = 0, h_max = 0;
|
|
|
|
VdpStatus vdp_st = vdp->
|
|
|
|
bitmap_surface_query_capabilities(vc->vdp_device, format,
|
|
|
|
&(VdpBool){0}, &w_max, &h_max);
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
CHECK_ST_WARNING("Query to get max OSD surface size failed");
|
2012-08-26 18:00:26 +02:00
|
|
|
packer->w_max = w_max;
|
|
|
|
packer->h_max = h_max;
|
|
|
|
return packer;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static void draw_osd_part(struct vo *vo, int index)
|
2009-05-06 20:04:37 +02:00
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-23 10:21:57 +01:00
|
|
|
VdpStatus vdp_st;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_bitmap_surface *sfc = &vc->osd_surfaces[index];
|
2009-05-06 22:42:24 +02:00
|
|
|
VdpOutputSurface output_surface = vc->output_surfaces[vc->surface_num];
|
2009-02-23 10:21:57 +01:00
|
|
|
int i;
|
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
VdpOutputSurfaceRenderBlendState blend_state = {
|
|
|
|
.struct_version = VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION,
|
|
|
|
.blend_factor_source_color =
|
|
|
|
VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
|
|
|
|
.blend_factor_source_alpha =
|
|
|
|
VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE,
|
|
|
|
.blend_factor_destination_color =
|
|
|
|
VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
|
|
|
|
.blend_factor_destination_alpha =
|
|
|
|
VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_SRC_ALPHA,
|
|
|
|
.blend_equation_color = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
|
|
|
|
.blend_equation_alpha = VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD,
|
|
|
|
};
|
|
|
|
|
2012-10-05 20:37:16 +02:00
|
|
|
VdpOutputSurfaceRenderBlendState blend_state_premultiplied = blend_state;
|
|
|
|
blend_state_premultiplied.blend_factor_source_color =
|
|
|
|
VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE;
|
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
for (i = 0; i < sfc->render_count; i++) {
|
2012-10-05 20:37:16 +02:00
|
|
|
VdpOutputSurfaceRenderBlendState *blend = &blend_state;
|
|
|
|
if (sfc->format == VDP_RGBA_FORMAT_B8G8R8A8)
|
|
|
|
blend = &blend_state_premultiplied;
|
2009-05-09 17:08:30 +02:00
|
|
|
vdp_st = vdp->
|
|
|
|
output_surface_render_bitmap_surface(output_surface,
|
2012-09-28 21:49:09 +02:00
|
|
|
&sfc->targets[i].dest,
|
|
|
|
sfc->surface,
|
|
|
|
&sfc->targets[i].source,
|
|
|
|
&sfc->targets[i].color,
|
2012-10-05 20:37:16 +02:00
|
|
|
blend,
|
2009-05-09 17:08:30 +02:00
|
|
|
VDP_OUTPUT_SURFACE_RENDER_ROTATE_0);
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
CHECK_ST_WARNING("OSD: Error when rendering");
|
2009-02-23 10:21:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static void generate_osd_part(struct vo *vo, struct sub_bitmaps *imgs)
|
2009-05-06 20:04:37 +02:00
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-23 10:21:57 +01:00
|
|
|
VdpStatus vdp_st;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_bitmap_surface *sfc = &vc->osd_surfaces[imgs->render_index];
|
2012-08-26 18:00:26 +02:00
|
|
|
bool need_upload = false;
|
2011-10-27 12:07:10 +02:00
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
if (imgs->bitmap_pos_id == sfc->bitmap_pos_id)
|
2012-08-26 18:00:26 +02:00
|
|
|
return; // Nothing changed and we still have the old data
|
2011-10-27 12:07:10 +02:00
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
sfc->render_count = 0;
|
2012-08-26 18:00:26 +02:00
|
|
|
|
2012-10-02 00:25:06 +02:00
|
|
|
if (imgs->format == SUBBITMAP_EMPTY || imgs->num_parts == 0)
|
2012-08-31 14:42:30 +02:00
|
|
|
return;
|
2012-08-26 18:00:26 +02:00
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
if (imgs->bitmap_id == sfc->bitmap_id)
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
goto osd_skip_upload;
|
2011-10-27 12:07:10 +02:00
|
|
|
|
2012-08-26 18:00:26 +02:00
|
|
|
need_upload = true;
|
2012-08-31 14:42:30 +02:00
|
|
|
VdpRGBAFormat format;
|
|
|
|
int format_size;
|
2012-09-28 21:19:36 +02:00
|
|
|
switch (imgs->format) {
|
2012-08-31 14:42:30 +02:00
|
|
|
case SUBBITMAP_LIBASS:
|
|
|
|
format = VDP_RGBA_FORMAT_A8;
|
|
|
|
format_size = 1;
|
|
|
|
break;
|
|
|
|
case SUBBITMAP_RGBA:
|
|
|
|
format = VDP_RGBA_FORMAT_B8G8R8A8;
|
|
|
|
format_size = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
};
|
|
|
|
if (sfc->format != format) {
|
|
|
|
talloc_free(sfc->packer);
|
|
|
|
sfc->packer = NULL;
|
|
|
|
};
|
|
|
|
sfc->format = format;
|
2012-08-26 18:00:26 +02:00
|
|
|
if (!sfc->packer)
|
2012-08-31 14:42:30 +02:00
|
|
|
sfc->packer = make_packer(vo, format);
|
2012-09-28 21:19:36 +02:00
|
|
|
sfc->packer->padding = imgs->scaled; // assume 2x2 filter on scaling
|
|
|
|
int r = packer_pack_from_subbitmaps(sfc->packer, imgs);
|
2012-08-26 18:00:26 +02:00
|
|
|
if (r < 0) {
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] OSD bitmaps do not fit on "
|
2012-08-26 18:00:26 +02:00
|
|
|
"a surface with the maximum supported size\n");
|
|
|
|
return;
|
|
|
|
} else if (r == 1) {
|
|
|
|
if (sfc->surface != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->bitmap_surface_destroy(sfc->surface);
|
2010-05-26 07:54:09 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_bitmap_surface_destroy");
|
|
|
|
}
|
2012-08-26 18:00:26 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] Allocating a %dx%d surface for "
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
"OSD bitmaps.\n", sfc->packer->w, sfc->packer->h);
|
2012-08-31 14:42:30 +02:00
|
|
|
vdp_st = vdp->bitmap_surface_create(vc->vdp_device, format,
|
2012-08-26 18:00:26 +02:00
|
|
|
sfc->packer->w, sfc->packer->h,
|
|
|
|
true, &sfc->surface);
|
2009-09-02 19:21:24 +02:00
|
|
|
if (vdp_st != VDP_STATUS_OK)
|
2012-08-26 18:00:26 +02:00
|
|
|
sfc->surface = VDP_INVALID_HANDLE;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
CHECK_ST_WARNING("OSD: error when creating surface");
|
2009-02-23 10:21:57 +01:00
|
|
|
}
|
2012-08-31 14:42:30 +02:00
|
|
|
if (imgs->scaled) {
|
|
|
|
char zeros[sfc->packer->used_width * format_size];
|
|
|
|
memset(zeros, 0, sizeof(zeros));
|
|
|
|
vdp_st = vdp->bitmap_surface_put_bits_native(sfc->surface,
|
|
|
|
&(const void *){zeros}, &(uint32_t){0},
|
|
|
|
&(VdpRect){0, 0, sfc->packer->used_width,
|
|
|
|
sfc->packer->used_height});
|
|
|
|
}
|
2009-02-23 10:21:57 +01:00
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
osd_skip_upload:
|
2012-08-26 18:00:26 +02:00
|
|
|
if (sfc->surface == VDP_INVALID_HANDLE)
|
2009-09-02 19:21:24 +02:00
|
|
|
return;
|
2012-09-28 21:49:09 +02:00
|
|
|
if (sfc->packer->count > sfc->targets_size) {
|
|
|
|
talloc_free(sfc->targets);
|
|
|
|
sfc->targets_size = sfc->packer->count;
|
|
|
|
sfc->targets = talloc_size(vc, sfc->targets_size
|
|
|
|
* sizeof(*sfc->targets));
|
2012-09-28 21:19:36 +02:00
|
|
|
}
|
2012-08-26 18:00:26 +02:00
|
|
|
|
2012-09-28 21:19:36 +02:00
|
|
|
for (int i = 0 ;i < sfc->packer->count; i++) {
|
|
|
|
struct sub_bitmap *b = &imgs->parts[i];
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_target *target = sfc->targets + sfc->render_count;
|
2012-09-28 21:19:36 +02:00
|
|
|
int x = sfc->packer->result[i].x;
|
|
|
|
int y = sfc->packer->result[i].y;
|
|
|
|
target->source = (VdpRect){x, y, x + b->w, y + b->h};
|
|
|
|
target->dest = (VdpRect){b->x, b->y, b->x + b->dw, b->y + b->dh};
|
|
|
|
target->color = (VdpColor){1, 1, 1, 1};
|
|
|
|
if (imgs->format == SUBBITMAP_LIBASS) {
|
|
|
|
uint32_t color = b->libass.color;
|
|
|
|
target->color.alpha = 1.0 - ((color >> 0) & 0xff) / 255.0;
|
|
|
|
target->color.blue = ((color >> 8) & 0xff) / 255.0;
|
|
|
|
target->color.green = ((color >> 16) & 0xff) / 255.0;
|
|
|
|
target->color.red = ((color >> 24) & 0xff) / 255.0;
|
2012-08-31 14:42:30 +02:00
|
|
|
}
|
2012-09-28 21:19:36 +02:00
|
|
|
if (need_upload) {
|
|
|
|
vdp_st = vdp->
|
|
|
|
bitmap_surface_put_bits_native(sfc->surface,
|
|
|
|
&(const void *){b->bitmap},
|
|
|
|
&(uint32_t){b->stride},
|
|
|
|
&target->source);
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
CHECK_ST_WARNING("OSD: putbits failed");
|
2012-08-26 18:00:26 +02:00
|
|
|
}
|
2012-09-28 21:49:09 +02:00
|
|
|
sfc->render_count++;
|
2012-08-26 18:00:26 +02:00
|
|
|
}
|
2012-09-28 21:19:36 +02:00
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
sfc->bitmap_id = imgs->bitmap_id;
|
|
|
|
sfc->bitmap_pos_id = imgs->bitmap_pos_id;
|
2012-08-26 18:00:26 +02:00
|
|
|
}
|
2011-10-27 12:07:10 +02:00
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static void draw_osd_cb(void *ctx, struct sub_bitmaps *imgs)
|
2012-08-26 18:00:26 +02:00
|
|
|
{
|
|
|
|
struct vo *vo = ctx;
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
generate_osd_part(vo, imgs);
|
|
|
|
draw_osd_part(vo, imgs->render_index);
|
2012-08-26 18:00:26 +02:00
|
|
|
}
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static void draw_osd(struct vo *vo, struct osd_state *osd)
|
2012-08-26 18:00:26 +02:00
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
if (!status_ok(vo))
|
2012-08-26 18:00:26 +02:00
|
|
|
return;
|
|
|
|
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
static const bool formats[SUBBITMAP_COUNT] = {
|
|
|
|
[SUBBITMAP_LIBASS] = true,
|
|
|
|
[SUBBITMAP_RGBA] = true,
|
2012-08-26 18:00:26 +02:00
|
|
|
};
|
|
|
|
|
2012-10-27 22:10:32 +02:00
|
|
|
osd_draw(osd, vc->osd_rect, osd->vo_pts, 0, formats, draw_osd_cb, vo);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2010-05-14 04:18:38 +02:00
|
|
|
static int update_presentation_queue_status(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
2009-05-09 17:08:30 +02:00
|
|
|
|
2010-05-14 04:18:38 +02:00
|
|
|
while (vc->query_surface_num != vc->surface_num) {
|
|
|
|
VdpTime vtime;
|
|
|
|
VdpPresentationQueueStatus status;
|
|
|
|
VdpOutputSurface surface = vc->output_surfaces[vc->query_surface_num];
|
|
|
|
vdp_st = vdp->presentation_queue_query_surface_status(vc->flip_queue,
|
|
|
|
surface,
|
|
|
|
&status, &vtime);
|
|
|
|
CHECK_ST_WARNING("Error calling "
|
|
|
|
"presentation_queue_query_surface_status");
|
|
|
|
if (status == VDP_PRESENTATION_QUEUE_STATUS_QUEUED)
|
|
|
|
break;
|
|
|
|
if (vc->vsync_interval > 1) {
|
|
|
|
uint64_t qtime = vc->queue_time[vc->query_surface_num];
|
|
|
|
if (vtime < qtime + vc->vsync_interval / 2)
|
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] Frame shown too early\n");
|
|
|
|
if (vtime > qtime + vc->vsync_interval)
|
|
|
|
mp_msg(MSGT_VO, MSGL_V, "[vdpau] Frame shown late\n");
|
|
|
|
}
|
|
|
|
vc->query_surface_num = WRAP_ADD(vc->query_surface_num, 1,
|
|
|
|
vc->num_output_surfaces);
|
|
|
|
vc->recent_vsync_time = vtime;
|
|
|
|
}
|
|
|
|
int num_queued = WRAP_ADD(vc->surface_num, -vc->query_surface_num,
|
|
|
|
vc->num_output_surfaces);
|
2011-07-04 23:48:41 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_DBG3, "[vdpau] Queued surface count (before add): "
|
2010-05-14 04:18:38 +02:00
|
|
|
"%d\n", num_queued);
|
|
|
|
return num_queued;
|
2009-11-15 03:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t prev_vs2(struct vdpctx *vc, uint64_t ts, int shift)
|
|
|
|
{
|
|
|
|
uint64_t offset = ts - vc->recent_vsync_time;
|
|
|
|
// Fix negative values for 1<<shift vsyncs before vc->recent_vsync_time
|
|
|
|
offset += (uint64_t)vc->vsync_interval << shift;
|
|
|
|
offset %= vc->vsync_interval;
|
|
|
|
return ts - offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void flip_page_timed(struct vo *vo, unsigned int pts_us, int duration)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
uint32_t vsync_interval = vc->vsync_interval;
|
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
if (handle_preemption(vo) < 0)
|
|
|
|
return;
|
|
|
|
|
2009-11-15 03:39:22 +01:00
|
|
|
if (duration > INT_MAX / 1000)
|
|
|
|
duration = -1;
|
|
|
|
else
|
|
|
|
duration *= 1000;
|
|
|
|
|
2012-07-27 02:40:38 +02:00
|
|
|
if (vc->vsync_interval == 1)
|
2009-11-15 03:39:22 +01:00
|
|
|
duration = -1; // Make sure drop logic is disabled
|
|
|
|
|
|
|
|
uint64_t now = sync_vdptime(vo);
|
|
|
|
uint64_t pts = pts_us ? convert_to_vdptime(vo, pts_us) : now;
|
|
|
|
uint64_t ideal_pts = pts;
|
|
|
|
uint64_t npts = duration >= 0 ? pts + duration : UINT64_MAX;
|
|
|
|
|
|
|
|
#define PREV_VS2(ts, shift) prev_vs2(vc, ts, shift)
|
|
|
|
// Only gives accurate results for ts >= vc->recent_vsync_time
|
|
|
|
#define PREV_VSYNC(ts) PREV_VS2(ts, 0)
|
|
|
|
|
|
|
|
/* We hope to be here at least one vsync before the frame should be shown.
|
|
|
|
* If we are running late then don't drop the frame unless there is
|
|
|
|
* already one queued for the next vsync; even if we _hope_ to show the
|
|
|
|
* next frame soon enough to mean this one should be dropped we might
|
|
|
|
* not make the target time in reality. Without this check we could drop
|
|
|
|
* every frame, freezing the display completely if video lags behind.
|
|
|
|
*/
|
2010-05-06 03:46:10 +02:00
|
|
|
if (now > PREV_VSYNC(FFMAX(pts, vc->last_queue_time + vsync_interval)))
|
2009-11-15 03:39:22 +01:00
|
|
|
npts = UINT64_MAX;
|
|
|
|
|
|
|
|
/* Allow flipping a frame at a vsync if its presentation time is a
|
|
|
|
* bit after that vsync and the change makes the flip time delta
|
|
|
|
* from previous frame better match the target timestamp delta.
|
|
|
|
* This avoids instability with frame timestamps falling near vsyncs.
|
|
|
|
* For example if the frame timestamps were (with vsyncs at
|
|
|
|
* integer values) 0.01, 1.99, 4.01, 5.99, 8.01, ... then
|
|
|
|
* straightforward timing at next vsync would flip the frames at
|
|
|
|
* 1, 2, 5, 6, 9; this changes it to 1, 2, 4, 6, 8 and so on with
|
|
|
|
* regular 2-vsync intervals.
|
|
|
|
*
|
|
|
|
* Also allow moving the frame forward if it looks like we dropped
|
|
|
|
* the previous frame incorrectly (now that we know better after
|
|
|
|
* having final exact timestamp information for this frame) and
|
|
|
|
* there would unnecessarily be a vsync without a frame change.
|
|
|
|
*/
|
|
|
|
uint64_t vsync = PREV_VSYNC(pts);
|
|
|
|
if (pts < vsync + vsync_interval / 4
|
|
|
|
&& (vsync - PREV_VS2(vc->last_queue_time, 16)
|
|
|
|
> pts - vc->last_ideal_time + vsync_interval / 2
|
|
|
|
|| vc->dropped_frame && vsync > vc->dropped_time))
|
|
|
|
pts -= vsync_interval / 2;
|
|
|
|
|
|
|
|
vc->dropped_frame = true; // changed at end if false
|
|
|
|
vc->dropped_time = ideal_pts;
|
|
|
|
|
|
|
|
pts = FFMAX(pts, vc->last_queue_time + vsync_interval);
|
|
|
|
pts = FFMAX(pts, now);
|
|
|
|
if (npts < PREV_VSYNC(pts) + vsync_interval)
|
|
|
|
return;
|
|
|
|
|
2010-05-14 04:18:38 +02:00
|
|
|
int num_flips = update_presentation_queue_status(vo);
|
|
|
|
vsync = vc->recent_vsync_time + num_flips * vc->vsync_interval;
|
2009-11-15 03:39:22 +01:00
|
|
|
now = sync_vdptime(vo);
|
|
|
|
pts = FFMAX(pts, now);
|
2010-05-14 04:18:38 +02:00
|
|
|
pts = FFMAX(pts, vsync + (vsync_interval >> 2));
|
2009-11-15 03:39:22 +01:00
|
|
|
vsync = PREV_VSYNC(pts);
|
|
|
|
if (npts < vsync + vsync_interval)
|
|
|
|
return;
|
|
|
|
pts = vsync + (vsync_interval >> 2);
|
2009-05-09 17:08:30 +02:00
|
|
|
vdp_st =
|
|
|
|
vdp->presentation_queue_display(vc->flip_queue,
|
|
|
|
vc->output_surfaces[vc->surface_num],
|
2009-11-15 03:39:22 +01:00
|
|
|
vo->dwidth, vo->dheight, pts);
|
2009-05-09 17:08:30 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_presentation_queue_display");
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-11-15 03:39:22 +01:00
|
|
|
vc->last_queue_time = pts;
|
2010-05-14 04:18:38 +02:00
|
|
|
vc->queue_time[vc->surface_num] = pts;
|
2009-11-15 03:39:22 +01:00
|
|
|
vc->last_ideal_time = ideal_pts;
|
|
|
|
vc->dropped_frame = false;
|
2010-05-14 04:18:38 +02:00
|
|
|
vc->surface_num = WRAP_ADD(vc->surface_num, 1, vc->num_output_surfaces);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2012-11-04 17:17:11 +01:00
|
|
|
static int decoder_render(struct vo *vo, void *state_ptr)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
2012-11-04 17:17:11 +01:00
|
|
|
struct vdpau_render_state *rndr = (struct vdpau_render_state *)state_ptr;
|
2009-09-07 01:02:24 +02:00
|
|
|
|
|
|
|
if (handle_preemption(vo) < 0)
|
|
|
|
return VO_TRUE;
|
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
int max_refs = vc->image_format == IMGFMT_VDPAU_H264 ?
|
|
|
|
rndr->info.h264.num_ref_frames : 2;
|
2009-05-06 22:42:24 +02:00
|
|
|
if (!IMGFMT_IS_VDPAU(vc->image_format))
|
2009-02-16 21:58:13 +01:00
|
|
|
return VO_FALSE;
|
2009-05-06 22:42:24 +02:00
|
|
|
if ((vc->decoder == VDP_INVALID_HANDLE || vc->decoder_max_refs < max_refs)
|
2009-05-06 20:04:37 +02:00
|
|
|
&& !create_vdp_decoder(vo, max_refs))
|
2009-03-21 18:10:19 +01:00
|
|
|
return VO_FALSE;
|
2009-07-07 01:26:13 +02:00
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
vdp_st = vdp->decoder_render(vc->decoder, rndr->surface,
|
|
|
|
(void *)&rndr->info,
|
|
|
|
rndr->bitstream_buffers_used,
|
|
|
|
rndr->bitstream_buffers);
|
2009-02-16 21:58:13 +01:00
|
|
|
CHECK_ST_WARNING("Failed VDPAU decoder rendering");
|
|
|
|
return VO_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
static struct vdpau_render_state *get_surface(struct vo *vo, int number)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
|
2012-10-30 17:42:25 +01:00
|
|
|
if (number >= MAX_VIDEO_SURFACES)
|
2009-02-16 21:58:13 +01:00
|
|
|
return NULL;
|
2009-09-07 01:02:24 +02:00
|
|
|
if (vc->surface_render[number].surface == VDP_INVALID_HANDLE
|
|
|
|
&& !vc->is_preempted) {
|
2009-02-16 21:58:13 +01:00
|
|
|
VdpStatus vdp_st;
|
2009-05-06 22:42:24 +02:00
|
|
|
vdp_st = vdp->video_surface_create(vc->vdp_device, vc->vdp_chroma_type,
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->vid_width, vc->vid_height,
|
|
|
|
&vc->surface_render[number].surface);
|
2009-05-08 19:57:22 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_video_surface_create");
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
2011-07-04 23:48:41 +02:00
|
|
|
mp_msg(MSGT_VO, MSGL_DBG3, "vdpau vid create: %u\n",
|
2009-05-09 17:08:30 +02:00
|
|
|
vc->surface_render[number].surface);
|
2009-05-06 22:42:24 +02:00
|
|
|
return &vc->surface_render[number];
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
static void draw_image(struct vo *vo, mp_image_t *mpi)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2009-09-18 15:27:55 +02:00
|
|
|
struct mp_image *reserved_mpi = NULL;
|
|
|
|
struct vdpau_render_state *rndr;
|
|
|
|
|
2009-05-06 22:42:24 +02:00
|
|
|
if (IMGFMT_IS_VDPAU(vc->image_format)) {
|
2012-11-06 15:27:44 +01:00
|
|
|
rndr = (struct vdpau_render_state *)mpi->planes[0];
|
2009-09-18 15:27:55 +02:00
|
|
|
reserved_mpi = mpi;
|
2012-11-04 18:16:36 +01:00
|
|
|
} else {
|
2009-09-18 15:27:55 +02:00
|
|
|
rndr = get_surface(vo, vc->deint_counter);
|
2010-05-06 03:46:10 +02:00
|
|
|
vc->deint_counter = WRAP_ADD(vc->deint_counter, 1, NUM_BUFFERED_VIDEO);
|
2011-12-20 02:47:16 +01:00
|
|
|
if (handle_preemption(vo) >= 0) {
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
const void *destdata[3] = {mpi->planes[0], mpi->planes[2],
|
|
|
|
mpi->planes[1]};
|
|
|
|
if (vc->image_format == IMGFMT_NV12)
|
|
|
|
destdata[1] = destdata[2];
|
|
|
|
vdp_st = vdp->video_surface_put_bits_y_cb_cr(rndr->surface,
|
|
|
|
vc->vdp_pixel_format, destdata, mpi->stride);
|
|
|
|
CHECK_ST_WARNING("Error when calling "
|
|
|
|
"vdp_video_surface_put_bits_y_cb_cr");
|
|
|
|
}
|
2012-11-04 18:16:36 +01:00
|
|
|
}
|
2009-03-29 13:15:22 +02:00
|
|
|
if (mpi->fields & MP_IMGFIELD_ORDERED)
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->top_field_first = !!(mpi->fields & MP_IMGFIELD_TOP_FIRST);
|
2009-03-29 13:15:22 +02:00
|
|
|
else
|
2009-05-06 22:42:24 +02:00
|
|
|
vc->top_field_first = 1;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
add_new_video_surface(vo, rndr->surface, reserved_mpi, mpi->pts);
|
2009-09-18 15:27:55 +02:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-10-06 20:46:01 +02:00
|
|
|
// warning: the size and pixel format of surface must match that of the
|
|
|
|
// surfaces in vc->output_surfaces
|
|
|
|
static struct mp_image *read_output_surface(struct vdpctx *vc,
|
2012-10-21 00:48:01 +02:00
|
|
|
VdpOutputSurface surface,
|
|
|
|
int width, int height)
|
2011-10-06 20:46:01 +02:00
|
|
|
{
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2012-12-22 21:46:22 +01:00
|
|
|
struct mp_image *image = mp_image_alloc(IMGFMT_BGR32, width, height);
|
2012-10-26 19:29:47 +02:00
|
|
|
image->colorspace = MP_CSP_RGB;
|
|
|
|
image->levels = vc->colorspace.levels_out; // hardcoded with conv. matrix
|
2011-10-06 20:46:01 +02:00
|
|
|
|
|
|
|
void *dst_planes[] = { image->planes[0] };
|
|
|
|
uint32_t dst_pitches[] = { image->stride[0] };
|
|
|
|
vdp_st = vdp->output_surface_get_bits_native(surface, NULL, dst_planes,
|
|
|
|
dst_pitches);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_output_surface_get_bits_native");
|
|
|
|
|
|
|
|
return image;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mp_image *get_screenshot(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
2012-10-21 00:48:01 +02:00
|
|
|
VdpStatus vdp_st;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
2011-10-06 20:46:01 +02:00
|
|
|
|
2012-10-21 00:48:01 +02:00
|
|
|
if (vc->screenshot_surface == VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->output_surface_create(vc->vdp_device,
|
|
|
|
OUTPUT_RGBA_FORMAT,
|
|
|
|
vc->vid_width, vc->vid_height,
|
|
|
|
&vc->screenshot_surface);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_output_surface_create");
|
|
|
|
}
|
2011-10-06 20:46:01 +02:00
|
|
|
|
|
|
|
VdpRect rc = { .x1 = vc->vid_width, .y1 = vc->vid_height };
|
2012-10-21 00:48:01 +02:00
|
|
|
render_video_to_output_surface(vo, vc->screenshot_surface, &rc);
|
2011-10-06 20:46:01 +02:00
|
|
|
|
2012-10-21 00:48:01 +02:00
|
|
|
struct mp_image *image = read_output_surface(vc, vc->screenshot_surface,
|
|
|
|
vc->vid_width, vc->vid_height);
|
2011-10-06 20:46:01 +02:00
|
|
|
|
2012-11-10 02:02:24 +01:00
|
|
|
mp_image_set_display_size(image, vo->aspdat.prew, vo->aspdat.preh);
|
2011-10-06 20:46:01 +02:00
|
|
|
|
|
|
|
return image;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mp_image *get_window_screenshot(struct vo *vo)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
int last_surface = WRAP_ADD(vc->surface_num, -1, vc->num_output_surfaces);
|
|
|
|
VdpOutputSurface screen = vc->output_surfaces[last_surface];
|
2012-10-21 00:48:01 +02:00
|
|
|
struct mp_image *image = read_output_surface(vo->priv, screen,
|
|
|
|
vc->output_surface_width,
|
|
|
|
vc->output_surface_height);
|
2012-11-10 02:02:24 +01:00
|
|
|
mp_image_set_size(image, vo->dwidth, vo->dheight);
|
2011-10-06 20:46:01 +02:00
|
|
|
return image;
|
|
|
|
}
|
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
static void release_decoder_surface(void *ptr)
|
|
|
|
{
|
|
|
|
bool *in_use_ptr = ptr;
|
|
|
|
*in_use_ptr = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mp_image *get_decoder_surface(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 22:42:24 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-05-09 17:08:30 +02:00
|
|
|
if (!IMGFMT_IS_VDPAU(vc->image_format))
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
return NULL;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
for (int n = 0; n < MAX_VIDEO_SURFACES; n++) {
|
|
|
|
if (!vc->surface_in_use[n]) {
|
|
|
|
vc->surface_in_use[n] = true;
|
|
|
|
struct mp_image *res =
|
|
|
|
mp_image_new_custom_ref(&(struct mp_image){0},
|
|
|
|
&vc->surface_in_use[n],
|
|
|
|
release_decoder_surface);
|
|
|
|
mp_image_setfmt(res, vc->image_format);
|
|
|
|
mp_image_set_size(res, vc->vid_width, vc->vid_height);
|
|
|
|
struct vdpau_render_state *rndr = get_surface(vo, n);
|
|
|
|
res->planes[0] = (void *)rndr;
|
|
|
|
return res;
|
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
|
|
|
|
mp_msg(MSGT_VO, MSGL_ERR, "[vdpau] no surfaces available in "
|
|
|
|
"get_decoder_surface\n");
|
|
|
|
// TODO: this probably breaks things forever, provide a dummy buffer?
|
|
|
|
return NULL;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2012-11-04 16:24:18 +01:00
|
|
|
static int query_format(struct vo *vo, uint32_t format)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-09 17:08:30 +02:00
|
|
|
int default_flags = VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW
|
2012-11-15 15:03:40 +01:00
|
|
|
| VFCAP_OSD | VFCAP_FLIP;
|
2009-02-16 21:58:13 +01:00
|
|
|
switch (format) {
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 20:03:30 +01:00
|
|
|
case IMGFMT_420P:
|
2010-10-29 17:43:31 +02:00
|
|
|
case IMGFMT_NV12:
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 20:03:30 +01:00
|
|
|
case IMGFMT_YUYV:
|
2010-10-29 17:43:31 +02:00
|
|
|
case IMGFMT_UYVY:
|
|
|
|
case IMGFMT_VDPAU_MPEG1:
|
|
|
|
case IMGFMT_VDPAU_MPEG2:
|
|
|
|
case IMGFMT_VDPAU_H264:
|
|
|
|
case IMGFMT_VDPAU_WMV3:
|
|
|
|
case IMGFMT_VDPAU_VC1:
|
|
|
|
case IMGFMT_VDPAU_MPEG4:
|
|
|
|
return default_flags;
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
static void destroy_vdpau_objects(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
int i;
|
|
|
|
VdpStatus vdp_st;
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
free_video_specific(vo);
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-05-08 19:57:22 +02:00
|
|
|
if (vc->flip_queue != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->presentation_queue_destroy(vc->flip_queue);
|
2009-05-09 17:08:30 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_presentation_queue_destroy");
|
2009-05-08 19:57:22 +02:00
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-05-08 19:57:22 +02:00
|
|
|
if (vc->flip_target != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->presentation_queue_target_destroy(vc->flip_target);
|
2009-05-09 17:08:30 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling "
|
|
|
|
"vdp_presentation_queue_target_destroy");
|
2009-05-08 19:57:22 +02:00
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2012-08-26 18:00:26 +02:00
|
|
|
for (i = 0; i < vc->num_output_surfaces; i++) {
|
2009-05-08 19:57:22 +02:00
|
|
|
if (vc->output_surfaces[i] == VDP_INVALID_HANDLE)
|
|
|
|
continue;
|
2009-05-06 22:42:24 +02:00
|
|
|
vdp_st = vdp->output_surface_destroy(vc->output_surfaces[i]);
|
2009-05-09 17:08:30 +02:00
|
|
|
CHECK_ST_WARNING("Error when calling vdp_output_surface_destroy");
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2012-09-28 21:49:09 +02:00
|
|
|
for (int i = 0; i < MAX_OSD_PARTS; i++) {
|
VO, sub: refactor
Remove VFCTRL_DRAW_OSD, VFCAP_EOSD_FILTER, VFCAP_EOSD_RGBA, VFCAP_EOSD,
VOCTRL_DRAW_EOSD, VOCTRL_GET_EOSD_RES, VOCTRL_QUERY_EOSD_FORMAT.
Remove draw_osd_with_eosd(), which rendered the OSD by calling
VOCTRL_DRAW_EOSD. Change VOs to call osd_draw() directly, which takes
a callback as argument. (This basically works like the old OSD API,
except multiple OSD bitmap formats are supported and caching is
possible.)
Remove all mentions of "eosd". It's simply "osd" now.
Make OSD size per-OSD-object, as they can be different when using
vf_sub. Include display_par/video_par in resolution change detection.
Fix the issue with margin borders in vo_corevideo.
2012-10-19 19:25:18 +02:00
|
|
|
struct osd_bitmap_surface *sfc = &vc->osd_surfaces[i];
|
2012-09-28 21:49:09 +02:00
|
|
|
if (sfc->surface != VDP_INVALID_HANDLE) {
|
|
|
|
vdp_st = vdp->bitmap_surface_destroy(sfc->surface);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_bitmap_surface_destroy");
|
|
|
|
}
|
2009-02-23 10:21:57 +01:00
|
|
|
}
|
|
|
|
|
2009-05-08 20:48:01 +02:00
|
|
|
vdp_st = vdp->device_destroy(vc->vdp_device);
|
|
|
|
CHECK_ST_WARNING("Error when calling vdp_device_destroy");
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static void uninit(struct vo *vo)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 22:42:24 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
/* Destroy all vdpau objects */
|
2009-05-06 20:04:37 +02:00
|
|
|
destroy_vdpau_objects(vo);
|
2009-02-23 10:21:57 +01:00
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
vo_x11_uninit(vo);
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-11-15 14:41:25 +01:00
|
|
|
// Free bitstream buffers allocated by FFmpeg
|
|
|
|
for (int i = 0; i < MAX_VIDEO_SURFACES; i++)
|
|
|
|
av_freep(&vc->surface_render[i].bitstream_buffers);
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static int preinit(struct vo *vo, const char *arg)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2012-06-25 22:12:03 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2009-05-08 20:48:01 +02:00
|
|
|
// Mark everything as invalid first so uninit() can tell what has been
|
|
|
|
// allocated
|
2009-09-07 01:02:24 +02:00
|
|
|
mark_vdpau_objects_uninitialized(vo);
|
2009-05-08 20:48:01 +02:00
|
|
|
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
vc->colorspace = (struct mp_csp_details) MP_CSP_DETAILS_DEFAULTS;
|
|
|
|
vc->video_eq.capabilities = MP_CSP_EQ_CAPS_COLORMATRIX;
|
2012-06-25 22:12:03 +02:00
|
|
|
|
|
|
|
vc->deint_type = vc->deint ? FFABS(vc->deint) : 3;
|
2011-01-04 22:51:31 +01:00
|
|
|
if (vc->deint < 0)
|
|
|
|
vc->deint = 0;
|
2009-02-16 21:58:13 +01:00
|
|
|
|
2013-01-26 22:37:47 +01:00
|
|
|
if (!vo_x11_init(vo))
|
2009-02-16 21:58:13 +01:00
|
|
|
return -1;
|
|
|
|
|
2009-05-08 20:48:01 +02:00
|
|
|
// After this calling uninit() should work to free resources
|
|
|
|
|
|
|
|
if (win_x11_init_vdpau_procs(vo) < 0) {
|
2012-08-16 16:21:51 +02:00
|
|
|
if (vc->vdp && vc->vdp->device_destroy)
|
2009-05-08 20:48:01 +02:00
|
|
|
vc->vdp->device_destroy(vc->vdp_device);
|
|
|
|
vo_x11_uninit(vo);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
static int get_equalizer(struct vo *vo, const char *name, int *value)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
return mp_csp_equalizer_get(&vc->video_eq, name, value) >= 0 ?
|
|
|
|
VO_TRUE : VO_NOTIMPL;
|
2009-02-28 14:20:01 +01:00
|
|
|
}
|
|
|
|
|
2011-08-11 20:13:34 +02:00
|
|
|
static bool status_ok(struct vo *vo)
|
|
|
|
{
|
|
|
|
if (!vo->config_ok || handle_preemption(vo) < 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-05-06 20:04:37 +02:00
|
|
|
static int set_equalizer(struct vo *vo, const char *name, int value)
|
|
|
|
{
|
|
|
|
struct vdpctx *vc = vo->priv;
|
2009-02-28 14:20:01 +01:00
|
|
|
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
if (mp_csp_equalizer_set(&vc->video_eq, name, value) < 0)
|
2009-02-28 14:20:01 +01:00
|
|
|
return VO_NOTIMPL;
|
|
|
|
|
2011-08-11 20:13:34 +02:00
|
|
|
if (status_ok(vo))
|
|
|
|
update_csc_matrix(vo);
|
2009-11-15 14:21:40 +01:00
|
|
|
return true;
|
2009-02-28 14:20:01 +01:00
|
|
|
}
|
|
|
|
|
2011-08-11 20:13:34 +02:00
|
|
|
static void checked_resize(struct vo *vo)
|
|
|
|
{
|
|
|
|
if (!status_ok(vo))
|
|
|
|
return;
|
|
|
|
resize(vo);
|
|
|
|
}
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
static int control(struct vo *vo, uint32_t request, void *data)
|
2009-02-16 21:58:13 +01:00
|
|
|
{
|
2009-05-06 20:04:37 +02:00
|
|
|
struct vdpctx *vc = vo->priv;
|
|
|
|
struct vdp_functions *vdp = vc->vdp;
|
|
|
|
|
2009-09-07 01:02:24 +02:00
|
|
|
handle_preemption(vo);
|
|
|
|
|
2009-02-16 21:58:13 +01:00
|
|
|
switch (request) {
|
2009-09-05 05:20:03 +02:00
|
|
|
case VOCTRL_GET_DEINTERLACE:
|
2010-12-20 05:42:04 +01:00
|
|
|
*(int *)data = vc->deint;
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_SET_DEINTERLACE:
|
2010-12-20 05:42:04 +01:00
|
|
|
vc->deint = *(int *)data;
|
2009-09-05 05:20:03 +02:00
|
|
|
if (vc->deint)
|
|
|
|
vc->deint = vc->deint_type;
|
2011-08-11 20:13:34 +02:00
|
|
|
if (vc->deint_type > 2 && status_ok(vo)) {
|
2009-09-05 05:20:03 +02:00
|
|
|
VdpStatus vdp_st;
|
|
|
|
VdpVideoMixerFeature features[1] =
|
|
|
|
{vc->deint_type == 3 ?
|
|
|
|
VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL :
|
|
|
|
VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL};
|
|
|
|
VdpBool feature_enables[1] = {vc->deint ? VDP_TRUE : VDP_FALSE};
|
|
|
|
vdp_st = vdp->video_mixer_set_feature_enables(vc->video_mixer,
|
|
|
|
1, features,
|
|
|
|
feature_enables);
|
|
|
|
CHECK_ST_WARNING("Error changing deinterlacing settings");
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
2011-12-05 05:36:20 +01:00
|
|
|
vo->want_redraw = true;
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_PAUSE:
|
2009-11-15 03:39:22 +01:00
|
|
|
if (vc->dropped_frame)
|
2011-12-09 02:08:37 +01:00
|
|
|
vo->want_redraw = true;
|
2011-12-05 05:36:20 +01:00
|
|
|
return true;
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
case VOCTRL_HWDEC_ALLOC_SURFACE:
|
|
|
|
*(struct mp_image **)data = get_decoder_surface(vo);
|
|
|
|
return true;
|
2012-11-04 17:17:11 +01:00
|
|
|
case VOCTRL_HWDEC_DECODER_RENDER:
|
|
|
|
return decoder_render(vo, data);
|
2009-09-05 05:20:03 +02:00
|
|
|
case VOCTRL_BORDER:
|
|
|
|
vo_x11_border(vo);
|
2011-08-11 20:13:34 +02:00
|
|
|
checked_resize(vo);
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_FULLSCREEN:
|
|
|
|
vo_x11_fullscreen(vo);
|
2011-08-11 20:13:34 +02:00
|
|
|
checked_resize(vo);
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_GET_PANSCAN:
|
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_SET_PANSCAN:
|
2011-08-11 20:13:34 +02:00
|
|
|
checked_resize(vo);
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_SET_EQUALIZER: {
|
2011-12-05 05:36:20 +01:00
|
|
|
vo->want_redraw = true;
|
2009-09-05 05:20:03 +02:00
|
|
|
struct voctrl_set_equalizer_args *args = data;
|
|
|
|
return set_equalizer(vo, args->name, args->value);
|
|
|
|
}
|
|
|
|
case VOCTRL_GET_EQUALIZER: {
|
|
|
|
struct voctrl_get_equalizer_args *args = data;
|
|
|
|
return get_equalizer(vo, args->name, args->valueptr);
|
|
|
|
}
|
2009-11-15 14:21:40 +01:00
|
|
|
case VOCTRL_SET_YUV_COLORSPACE:
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
vc->colorspace = *(struct mp_csp_details *)data;
|
2011-08-11 20:13:34 +02:00
|
|
|
if (status_ok(vo))
|
|
|
|
update_csc_matrix(vo);
|
2011-12-05 05:36:20 +01:00
|
|
|
vo->want_redraw = true;
|
2009-11-15 14:21:40 +01:00
|
|
|
return true;
|
|
|
|
case VOCTRL_GET_YUV_COLORSPACE:
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 23:50:21 +02:00
|
|
|
*(struct mp_csp_details *)data = vc->colorspace;
|
2009-11-15 14:21:40 +01:00
|
|
|
return true;
|
2009-09-05 05:20:03 +02:00
|
|
|
case VOCTRL_ONTOP:
|
|
|
|
vo_x11_ontop(vo);
|
|
|
|
return VO_TRUE;
|
|
|
|
case VOCTRL_UPDATE_SCREENINFO:
|
2013-01-26 22:37:47 +01:00
|
|
|
vo_x11_update_screeninfo(vo);
|
2009-09-05 05:20:03 +02:00
|
|
|
return VO_TRUE;
|
2011-12-04 17:10:17 +01:00
|
|
|
case VOCTRL_NEWFRAME:
|
|
|
|
vc->deint_queue_pos = next_deint_queue_pos(vo, true);
|
2011-12-20 02:47:16 +01:00
|
|
|
if (status_ok(vo))
|
|
|
|
video_to_output_surface(vo);
|
2011-12-04 17:10:17 +01:00
|
|
|
return true;
|
|
|
|
case VOCTRL_SKIPFRAME:
|
|
|
|
vc->deint_queue_pos = next_deint_queue_pos(vo, true);
|
|
|
|
return true;
|
2011-12-05 04:24:18 +01:00
|
|
|
case VOCTRL_REDRAW_FRAME:
|
2011-12-20 02:47:16 +01:00
|
|
|
if (status_ok(vo))
|
|
|
|
video_to_output_surface(vo);
|
2009-09-05 05:33:33 +02:00
|
|
|
return true;
|
2009-09-18 15:27:55 +02:00
|
|
|
case VOCTRL_RESET:
|
|
|
|
forget_frames(vo);
|
|
|
|
return true;
|
2011-10-06 20:46:01 +02:00
|
|
|
case VOCTRL_SCREENSHOT: {
|
2011-12-20 02:47:16 +01:00
|
|
|
if (!status_ok(vo))
|
|
|
|
return false;
|
2011-10-06 20:46:01 +02:00
|
|
|
struct voctrl_screenshot_args *args = data;
|
|
|
|
if (args->full_window)
|
|
|
|
args->out_image = get_window_screenshot(vo);
|
|
|
|
else
|
|
|
|
args->out_image = get_screenshot(vo);
|
|
|
|
return true;
|
|
|
|
}
|
2009-02-16 21:58:13 +01:00
|
|
|
}
|
|
|
|
return VO_NOTIMPL;
|
|
|
|
}
|
|
|
|
|
2012-06-25 22:12:03 +02:00
|
|
|
#undef OPT_BASE_STRUCT
|
|
|
|
#define OPT_BASE_STRUCT struct vdpctx
|
|
|
|
|
2009-05-04 02:09:50 +02:00
|
|
|
const struct vo_driver video_out_vdpau = {
|
2009-09-18 15:27:55 +02:00
|
|
|
.buffer_frames = true,
|
2009-05-06 21:21:19 +02:00
|
|
|
.info = &(const struct vo_info_s){
|
2009-05-04 02:09:50 +02:00
|
|
|
"VDPAU with X11",
|
|
|
|
"vdpau",
|
|
|
|
"Rajib Mahapatra <rmahapatra@nvidia.com> and others",
|
|
|
|
""
|
|
|
|
},
|
|
|
|
.preinit = preinit,
|
2012-11-04 16:24:18 +01:00
|
|
|
.query_format = query_format,
|
2009-05-04 02:09:50 +02:00
|
|
|
.config = config,
|
|
|
|
.control = control,
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 14:25:04 +01:00
|
|
|
.draw_image = draw_image,
|
2011-12-04 17:10:17 +01:00
|
|
|
.get_buffered_frame = set_next_frame_info,
|
2009-05-04 02:09:50 +02:00
|
|
|
.draw_osd = draw_osd,
|
2009-11-15 03:39:22 +01:00
|
|
|
.flip_page_timed = flip_page_timed,
|
2009-05-04 02:09:50 +02:00
|
|
|
.check_events = check_events,
|
|
|
|
.uninit = uninit,
|
2012-08-06 17:51:53 +02:00
|
|
|
.priv_size = sizeof(struct vdpctx),
|
2012-06-25 22:12:03 +02:00
|
|
|
.options = (const struct m_option []){
|
|
|
|
OPT_INTRANGE("deint", deint, 0, -4, 4),
|
2013-02-08 21:09:18 +01:00
|
|
|
OPT_FLAG("chroma-deint", chroma_deint, 0, OPTDEF_INT(1)),
|
|
|
|
OPT_FLAG("pullup", pullup, 0),
|
2012-06-25 22:12:03 +02:00
|
|
|
OPT_FLOATRANGE("denoise", denoise, 0, 0, 1),
|
|
|
|
OPT_FLOATRANGE("sharpen", sharpen, 0, -1, 1),
|
|
|
|
OPT_INTRANGE("hqscaling", hqscaling, 0, 0, 9),
|
|
|
|
OPT_FLOAT("fps", user_fps, 0),
|
2013-02-08 21:09:18 +01:00
|
|
|
OPT_FLAG("composite-detect", composite_detect, 0, OPTDEF_INT(1)),
|
2012-06-25 22:12:03 +02:00
|
|
|
OPT_INT("queuetime_windowed", flip_offset_window, 0, OPTDEF_INT(50)),
|
|
|
|
OPT_INT("queuetime_fs", flip_offset_fs, 0, OPTDEF_INT(50)),
|
|
|
|
OPT_INTRANGE("output_surfaces", num_output_surfaces, 0,
|
|
|
|
2, MAX_OUTPUT_SURFACES, OPTDEF_INT(3)),
|
|
|
|
{NULL},
|
|
|
|
}
|
2009-05-04 02:09:50 +02:00
|
|
|
};
|