demux: refactor to export seek ranges

Even though only 1 seek range is supported at the time.

Other than preparation for possibly future features, the main gain is
actually that we finally separate the reporting for the buffering, and
the seek ranges. These can be subtly different, so it's good to have a
clear separation.

This commit also fixes that the ts_reader wasn't rebased to the start
time, which could make the player show "???" for buffered cache amount
in some .ts files and others (especially at the end, when ts_reader
could become higher than ts_max). It also fixes writing the cache-end
field in the demuxer-cache-state property: it checked ts_start against
NOPTS, which makes no sense.

ts_start was never used (except for the bug mentioned above), so get rid
of it completely. This also makes it convenient to move the segment
check for last_ts to the demux_add_packet() function.
This commit is contained in:
wm4 2017-10-30 14:26:54 +01:00
parent d6ebb2df47
commit 2d958dbf2b
3 changed files with 49 additions and 43 deletions

View File

@ -659,6 +659,8 @@ void demux_add_packet(struct sh_stream *stream, demux_packet_t *dp)
dp->pts = dp->dts;
double ts = dp->dts == MP_NOPTS_VALUE ? dp->pts : dp->dts;
if (dp->segmented)
ts = MP_PTS_MIN(ts, dp->end);
if (ts != MP_NOPTS_VALUE && (ts > ds->last_ts || ts + 10 < ds->last_ts))
ds->last_ts = ts;
if (ds->base_ts == MP_NOPTS_VALUE)
@ -1681,15 +1683,16 @@ static bool try_seek_cache(struct demux_internal *in, double pts, int flags)
if (cached_demux_control(in, DEMUXER_CTRL_GET_READER_STATE, &rstate) < 0)
return false;
double start = MP_ADD_PTS(rstate.ts_min, -in->ts_offset);
double end = MP_ADD_PTS(rstate.ts_max, -in->ts_offset);
struct demux_seek_range r = {MP_NOPTS_VALUE, MP_NOPTS_VALUE};
if (rstate.num_seek_ranges > 0)
r = rstate.seek_ranges[0];
MP_VERBOSE(in, "in-cache seek range = %f <-> %f (%f)\n", start, end, pts);
r.start = MP_ADD_PTS(r.start, -in->ts_offset);
r.end = MP_ADD_PTS(r.end, -in->ts_offset);
if (start == MP_NOPTS_VALUE || end == MP_NOPTS_VALUE)
return false;
MP_VERBOSE(in, "in-cache seek range = %f <-> %f (%f)\n", r.start, r.end, pts);
if (pts < start || pts > end)
if (pts < r.start || pts > r.end)
return false;
clear_reader_state(in);
@ -1971,15 +1974,13 @@ static int cached_demux_control(struct demux_internal *in, int cmd, void *arg)
struct demux_ctrl_reader_state *r = arg;
*r = (struct demux_ctrl_reader_state){
.eof = in->last_eof,
.seekable = in->seekable_cache,
.ts_start = MP_NOPTS_VALUE,
.ts_min = MP_NOPTS_VALUE,
.ts_max = MP_NOPTS_VALUE,
.ts_reader = MP_NOPTS_VALUE,
.ts_duration = -1,
};
bool any_packets = false;
bool seek_ok = true;
bool seek_ok = in->seekable_cache && !in->seeking;
double ts_min = MP_NOPTS_VALUE;
double ts_max = MP_NOPTS_VALUE;
for (int n = 0; n < in->num_streams; n++) {
struct demux_stream *ds = in->streams[n]->ds;
if (ds->active && !(!ds->queue_head && ds->eof) && !ds->ignore_eof)
@ -1988,36 +1989,35 @@ static int cached_demux_control(struct demux_internal *in, int cmd, void *arg)
r->ts_reader = MP_PTS_MAX(r->ts_reader, ds->base_ts);
// (yes, this is asymmetric, and uses MAX in both cases - it's ok
// if it's a bit off for ts_max, as the demuxer can just wait for
// new packets if we seek there and also last_ts is the hightest
// new packets if we seek there and also last_ts is the highest
// DTS or PTS, while ts_min should be as accurate as possible, as
// we would have to trigger a real seek if it's off and we seeked
// there)
r->ts_min = MP_PTS_MAX(r->ts_min, ds->back_pts);
r->ts_max = MP_PTS_MAX(r->ts_max, ds->last_ts);
ts_min = MP_PTS_MAX(ts_min, ds->back_pts);
ts_max = MP_PTS_MAX(ts_max, ds->last_ts);
if (ds->back_pts == MP_NOPTS_VALUE ||
ds->last_ts == MP_NOPTS_VALUE)
seek_ok = false;
if (ds->queue_head) {
any_packets = true;
double ts = PTS_OR_DEF(ds->queue_head->dts,
ds->queue_head->pts);
r->ts_start = MP_PTS_MIN(r->ts_start, ts);
if (ds->queue_tail->segmented)
r->ts_max = MP_PTS_MIN(r->ts_max, ds->queue_tail->end);
}
any_packets |= !!ds->queue_head;
}
}
r->idle = (in->idle && !r->underrun) || r->eof;
r->underrun &= !r->idle;
r->ts_start = MP_ADD_PTS(r->ts_start, in->ts_offset);
r->ts_min = MP_ADD_PTS(r->ts_min, in->ts_offset);
r->ts_max = MP_ADD_PTS(r->ts_max, in->ts_offset);
if (r->ts_reader != MP_NOPTS_VALUE && r->ts_reader <= r->ts_max)
r->ts_duration = r->ts_max - r->ts_reader;
ts_min = MP_ADD_PTS(ts_min, in->ts_offset);
ts_max = MP_ADD_PTS(ts_max, in->ts_offset);
r->ts_reader = MP_ADD_PTS(r->ts_reader, in->ts_offset);
if (r->ts_reader != MP_NOPTS_VALUE && r->ts_reader <= ts_max)
r->ts_duration = ts_max - r->ts_reader;
if (in->seeking || !any_packets)
r->ts_duration = 0;
if (in->seeking || !seek_ok)
r->ts_max = r->ts_min = MP_NOPTS_VALUE;
if (seek_ok && ts_min != MP_NOPTS_VALUE && ts_max > ts_min) {
r->num_seek_ranges = 1;
r->seek_ranges[0] = (struct demux_seek_range){
.start = ts_min,
.end = ts_max,
};
}
r->ts_end = ts_max;
return CONTROL_OK;
}
}

View File

@ -40,16 +40,21 @@ enum demux_ctrl {
DEMUXER_CTRL_REPLACE_STREAM,
};
#define MAX_SEEK_RANGES 1
struct demux_seek_range {
double start, end;
};
struct demux_ctrl_reader_state {
bool eof, underrun, idle, seekable;
bool eof, underrun, idle;
double ts_duration;
double ts_reader; // approx. timerstamp of decoder position
double ts_start; // approx. timestamp for the earliest packet buffered
double ts_min; // timestamp of the earliest packet in backward cache
// that can be seeked to (i.e. all streams have such
// a packet for which normal seeks can be executed)
double ts_max; // timestamp of latest packet in forward cache that can be
// seeked to
double ts_end; // approx. timestamp of end of buffered range
// Positions that can be seeked to without incurring the latency of a low
// level seek.
int num_seek_ranges;
struct demux_seek_range seek_ranges[MAX_SEEK_RANGES];
};
struct demux_ctrl_stream_ctrl {

View File

@ -1696,10 +1696,10 @@ static int mp_property_demuxer_cache_time(void *ctx, struct m_property *prop,
if (demux_control(mpctx->demuxer, DEMUXER_CTRL_GET_READER_STATE, &s) < 1)
return M_PROPERTY_UNAVAILABLE;
if (s.ts_max == MP_NOPTS_VALUE)
if (s.ts_end == MP_NOPTS_VALUE)
return M_PROPERTY_UNAVAILABLE;
return m_property_double_ro(action, arg, s.ts_max);
return m_property_double_ro(action, arg, s.ts_end);
}
static int mp_property_demuxer_cache_idle(void *ctx, struct m_property *prop,
@ -1739,14 +1739,15 @@ static int mp_property_demuxer_cache_state(void *ctx, struct m_property *prop,
struct mpv_node *ranges =
node_map_add(r, "seekable-ranges", MPV_FORMAT_NODE_ARRAY);
if (s.ts_min != MP_NOPTS_VALUE && s.ts_max != MP_NOPTS_VALUE && s.seekable) {
for (int n = 0; n < s.num_seek_ranges; n++) {
struct demux_seek_range *range = &s.seek_ranges[n];
struct mpv_node *sub = node_array_add(ranges, MPV_FORMAT_NODE_MAP);
node_map_add_double(sub, "start", s.ts_min);
node_map_add_double(sub, "end", s.ts_max);
node_map_add_double(sub, "start", range->start);
node_map_add_double(sub, "end", range->end);
}
if (s.ts_start != MP_NOPTS_VALUE)
node_map_add_double(r, "cache-end", s.ts_max);
if (s.ts_end != MP_NOPTS_VALUE)
node_map_add_double(r, "cache-end", s.ts_end);
if (s.ts_reader != MP_NOPTS_VALUE)
node_map_add_double(r, "reader-pts", s.ts_reader);