1
mirror of https://github.com/mpv-player/mpv synced 2024-08-24 07:21:49 +02:00

Remove support for LIVE555 RTSP streaming

The main excuse for removing this is that LIVE555 deprecated the API
the mplayer implementation was using. The old API still seems to be
somewhat supported, but must be explicitly enabled at LIVE555
compilation, so mplayer won't always work on any user installation.

The implementation was also very messy, in C++, and FFmpeg support is
available as alternative.

Remove it completely.
This commit is contained in:
wm4 2012-08-18 10:37:34 +02:00
parent 8ca3ec1562
commit 3a5d5f01d4
14 changed files with 2 additions and 1476 deletions

View File

@ -65,9 +65,6 @@ SRCS_COMMON-$(LIBNEMESI) += libmpdemux/demux_nemesi.c \
SRCS_COMMON-$(LIBPOSTPROC) += libmpcodecs/vf_pp.c
SRCS_COMMON-$(LIBSMBCLIENT) += stream/stream_smb.c
SRCS_COMMON-$(LIVE555) += libmpdemux/demux_rtp.cpp \
libmpdemux/demux_rtp_codec.cpp \
stream/stream_live555.c
SRCS_COMMON-$(MACOSX_FINDER) += osdep/macosx_finder_args.m
SRCS_COMMON-$(COCOA) += libvo/osx_common.c \
libvo/cocoa_common.m \

View File

@ -382,13 +382,10 @@ const m_option_t common_opts[] = {
#endif /* HAVE_AF_INET6 */
#endif /* CONFIG_NETWORKING */
#ifdef CONFIG_LIVE555
{"rtsp-stream-over-http", &rtsp_transport_http, CONF_TYPE_FLAG, 0, 0, 1, NULL},
#endif /* CONFIG_LIVE555 */
#if defined(CONFIG_LIBNEMESI) || defined(CONFIG_LIVE555)
#if defined(CONFIG_LIBNEMESI)
// -rtsp-stream-over-tcp option, specifying TCP streaming of RTP/RTCP
{"rtsp-stream-over-tcp", &rtsp_transport_tcp, CONF_TYPE_FLAG, 0, 0, 1, NULL},
#endif /* defined(CONFIG_LIBNEMESI) || defined(CONFIG_LIVE555) */
#endif /* defined(CONFIG_LIBNEMESI) */
#ifdef CONFIG_LIBNEMESI
{"rtsp-stream-over-sctp", &rtsp_transport_sctp, CONF_TYPE_FLAG, 0, 0, 1, NULL},
#endif /* CONFIG_LIBNEMESI */

57
configure vendored
View File

@ -319,7 +319,6 @@ Optional features:
--disable-networking disable networking [enable]
--enable-winsock2_h enable winsock2_h [autodetect]
--enable-smb enable Samba (SMB) input [autodetect]
--enable-live enable LIVE555 Streaming Media [disable]
--enable-libquvi enable libquvi [autodetect]
--enable-nemesi enable Nemesi Streaming Media [autodetect]
--enable-lcms2 enable LCMS2 support [autodetect]
@ -474,7 +473,6 @@ _libbs2b=auto
_vcd=auto
_bluray=auto
_dvdread=auto
_live=no
_nemesi=auto
_lcms2=auto
_xinerama=auto
@ -694,8 +692,6 @@ for ac_option do
--disable-bluray) _bluray=no ;;
--enable-dvdread) _dvdread=yes ;;
--disable-dvdread) _dvdread=no ;;
--enable-live) _live=yes ;;
--disable-live) _live=no ;;
--enable-nemesi) _nemesi=yes ;;
--disable-nemesi) _nemesi=no ;;
--enable-lcms2) _lcms2=yes ;;
@ -3034,57 +3030,6 @@ else
fi
echores "$_nemesi"
echocheck "LIVE555 Streaming Media libraries"
if test "$_live" != no && test "$networking" = yes ; then
cat > $TMPCPP << EOF
#include <liveMedia.hh>
#if (LIVEMEDIA_LIBRARY_VERSION_INT < 1141257600)
#error Please upgrade to version 2006.03.03 or later of the "LIVE555 Streaming Media" libraries - available from <www.live555.com/liveMedia/>
#endif
int main(void) { return 0; }
EOF
_live=no
for I in $extra_cflags "-I$_libdir/live" "-I/usr/lib/live" "-I/usr/lib64/live" "-I/usr/local/live" "-I/usr/local/lib/live" ; do
cxx_check $I/liveMedia/include $I/UsageEnvironment/include $I/groupsock/include &&
_livelibdir=$(echo $I| sed s/-I//) &&
extra_ldflags="$_livelibdir/liveMedia/libliveMedia.a \
$_livelibdir/groupsock/libgroupsock.a \
$_livelibdir/UsageEnvironment/libUsageEnvironment.a \
$_livelibdir/BasicUsageEnvironment/libBasicUsageEnvironment.a \
$extra_ldflags -lstdc++" \
extra_cxxflags="-I$_livelibdir/liveMedia/include \
-I$_livelibdir/UsageEnvironment/include \
-I$_livelibdir/BasicUsageEnvironment/include \
-I$_livelibdir/groupsock/include" &&
_live=yes && break
done
if test "$_live" != yes ; then
ld_tmp="-lliveMedia -lgroupsock -lUsageEnvironment -lBasicUsageEnvironment -lstdc++"
if cxx_check -I/usr/include/liveMedia -I/usr/include/UsageEnvironment -I/usr/include/groupsock $ld_tmp; then
_live_dist=yes
fi
fi
fi
if test "$_live" = yes && test "$networking" = yes; then
test $_livelibdir && res_comment="using $_livelibdir"
def_live='#define CONFIG_LIVE555 1'
inputmodules="live555 $inputmodules"
elif test "$_live_dist" = yes && test "$networking" = yes; then
res_comment="using distribution version"
_live="yes"
def_live='#define CONFIG_LIVE555 1'
extra_ldflags="$extra_ldflags $ld_tmp"
extra_cxxflags="-I/usr/include/liveMedia -I/usr/include/UsageEnvironment -I/usr/include/BasicUsageEnvironment -I/usr/include/groupsock"
inputmodules="live555 $inputmodules"
else
_live=no
def_live='#undef CONFIG_LIVE555'
noinputmodules="live555 $noinputmodules"
fi
echores "$_live"
# Test with > against Libav 0.8 versions which will NOT work rather than
# specify minimum version, to allow (future) point releases to possibly work.
@ -3551,7 +3496,6 @@ LIBSMBCLIENT = $_smb
LIBQUVI = $_libquvi
LIBTHEORA = $_theora
LIRC = $_lirc
LIVE555 = $_live
MACOSX_FINDER = $_macosx_finder
MNG = $_mng
MPG123 = $_mpg123
@ -3772,7 +3716,6 @@ $def_ftp
$def_inet6
$def_inet_aton
$def_inet_pton
$def_live
$def_nemesi
$def_networking
$def_smb

View File

@ -1,733 +0,0 @@
/*
* routines (with C-linkage) that interface between MPlayer
* and the "LIVE555 Streaming Media" libraries
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define RTSPCLIENT_SYNCHRONOUS_INTERFACE 1
extern "C" {
// on MinGW, we must include windows.h before the things it conflicts
#ifdef __MINGW32__ // with. they are each protected from
#include <windows.h> // windows.h, but not the other way around.
#endif
#include "demux_rtp.h"
#include "stream/stream.h"
#include "stheader.h"
#include "options.h"
#include "config.h"
}
#include "demux_rtp_internal.h"
#include "BasicUsageEnvironment.hh"
#include "liveMedia.hh"
#include "GroupsockHelper.hh"
#include <unistd.h>
// A data structure representing input data for each stream:
class ReadBufferQueue {
public:
ReadBufferQueue(MediaSubsession* subsession, demuxer_t* demuxer,
char const* tag);
virtual ~ReadBufferQueue();
FramedSource* readSource() const { return fReadSource; }
RTPSource* rtpSource() const { return fRTPSource; }
demuxer_t* ourDemuxer() const { return fOurDemuxer; }
char const* tag() const { return fTag; }
char blockingFlag; // used to implement synchronous reads
// For A/V synchronization:
Boolean prevPacketWasSynchronized;
float prevPacketPTS;
ReadBufferQueue** otherQueue;
// The 'queue' actually consists of just a single "demux_packet_t"
// (because the underlying OS does the actual queueing/buffering):
demux_packet_t* dp;
// However, we sometimes inspect buffers before delivering them.
// For this, we maintain a queue of pending buffers:
void savePendingBuffer(demux_packet_t* dp);
demux_packet_t* getPendingBuffer();
// For H264 over rtsp using AVParser, the next packet has to be saved
demux_packet_t* nextpacket;
private:
demux_packet_t* pendingDPHead;
demux_packet_t* pendingDPTail;
FramedSource* fReadSource;
RTPSource* fRTPSource;
demuxer_t* fOurDemuxer;
char const* fTag; // used for debugging
};
// A structure of RTP-specific state, kept so that we can cleanly
// reclaim it:
struct RTPState {
char const* sdpDescription;
RTSPClient* rtspClient;
SIPClient* sipClient;
MediaSession* mediaSession;
ReadBufferQueue* audioBufferQueue;
ReadBufferQueue* videoBufferQueue;
unsigned flags;
struct timeval firstSyncTime;
};
extern "C" char* network_username;
extern "C" char* network_password;
static char* openURL_rtsp(RTSPClient* client, char const* url) {
// If we were given a user name (and optional password), then use them:
if (network_username != NULL) {
char const* password = network_password == NULL ? "" : network_password;
return client->describeWithPassword(url, network_username, password);
} else {
return client->describeURL(url);
}
}
static char* openURL_sip(SIPClient* client, char const* url) {
// If we were given a user name (and optional password), then use them:
if (network_username != NULL) {
char const* password = network_password == NULL ? "" : network_password;
return client->inviteWithPassword(url, network_username, password);
} else {
return client->invite(url);
}
}
#ifdef CONFIG_LIBNEMESI
extern int rtsp_transport_tcp;
extern int rtsp_transport_http;
#else
int rtsp_transport_tcp = 0;
int rtsp_transport_http = 0;
#endif
extern int rtsp_port;
extern AVCodecContext *avcctx;
extern "C" demuxer_t* demux_open_rtp(demuxer_t* demuxer) {
struct MPOpts *opts = demuxer->opts;
Boolean success = False;
do {
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
if (scheduler == NULL) break;
UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
if (env == NULL) break;
RTSPClient* rtspClient = NULL;
SIPClient* sipClient = NULL;
if (demuxer == NULL || demuxer->stream == NULL) break; // shouldn't happen
demuxer->stream->eof = 0; // just in case
// Look at the stream's 'priv' field to see if we were initiated
// via a SDP description:
char* sdpDescription = (char*)(demuxer->stream->priv);
if (sdpDescription == NULL) {
// We weren't given a SDP description directly, so assume that
// we were given a RTSP or SIP URL:
char const* protocol = demuxer->stream->streaming_ctrl->url->protocol;
char const* url = demuxer->stream->streaming_ctrl->url->url;
extern int verbose;
if (strcmp(protocol, "rtsp") == 0) {
if (rtsp_transport_http == 1) {
rtsp_transport_http = demuxer->stream->streaming_ctrl->url->port;
rtsp_transport_tcp = 1;
}
rtspClient = RTSPClient::createNew(*env, verbose, "MPlayer", rtsp_transport_http);
if (rtspClient == NULL) {
fprintf(stderr, "Failed to create RTSP client: %s\n",
env->getResultMsg());
break;
}
sdpDescription = openURL_rtsp(rtspClient, url);
} else { // SIP
unsigned char desiredAudioType = 0; // PCMU (use 3 for GSM)
sipClient = SIPClient::createNew(*env, desiredAudioType, NULL,
verbose, "MPlayer");
if (sipClient == NULL) {
fprintf(stderr, "Failed to create SIP client: %s\n",
env->getResultMsg());
break;
}
sipClient->setClientStartPortNum(8000);
sdpDescription = openURL_sip(sipClient, url);
}
if (sdpDescription == NULL) {
fprintf(stderr, "Failed to get a SDP description from URL \"%s\": %s\n",
url, env->getResultMsg());
break;
}
}
// Now that we have a SDP description, create a MediaSession from it:
MediaSession* mediaSession = MediaSession::createNew(*env, sdpDescription);
if (mediaSession == NULL) break;
// Create a 'RTPState' structure containing the state that we just created,
// and store it in the demuxer's 'priv' field, for future reference:
RTPState* rtpState = new RTPState;
rtpState->sdpDescription = sdpDescription;
rtpState->rtspClient = rtspClient;
rtpState->sipClient = sipClient;
rtpState->mediaSession = mediaSession;
rtpState->audioBufferQueue = rtpState->videoBufferQueue = NULL;
rtpState->flags = 0;
rtpState->firstSyncTime.tv_sec = rtpState->firstSyncTime.tv_usec = 0;
demuxer->priv = rtpState;
int audiofound = 0, videofound = 0;
// Create RTP receivers (sources) for each subsession:
MediaSubsessionIterator iter(*mediaSession);
MediaSubsession* subsession;
unsigned desiredReceiveBufferSize;
while ((subsession = iter.next()) != NULL) {
// Ignore any subsession that's not audio or video:
if (strcmp(subsession->mediumName(), "audio") == 0) {
if (audiofound) {
fprintf(stderr, "Additional subsession \"audio/%s\" skipped\n", subsession->codecName());
continue;
}
desiredReceiveBufferSize = 100000;
} else if (strcmp(subsession->mediumName(), "video") == 0) {
if (videofound) {
fprintf(stderr, "Additional subsession \"video/%s\" skipped\n", subsession->codecName());
continue;
}
desiredReceiveBufferSize = 2000000;
} else {
continue;
}
if (rtsp_port)
subsession->setClientPortNum (rtsp_port);
if (!subsession->initiate()) {
fprintf(stderr, "Failed to initiate \"%s/%s\" RTP subsession: %s\n", subsession->mediumName(), subsession->codecName(), env->getResultMsg());
} else {
fprintf(stderr, "Initiated \"%s/%s\" RTP subsession on port %d\n", subsession->mediumName(), subsession->codecName(), subsession->clientPortNum());
// Set the OS's socket receive buffer sufficiently large to avoid
// incoming packets getting dropped between successive reads from this
// subsession's demuxer. Depending on the bitrate(s) that you expect,
// you may wish to tweak the "desiredReceiveBufferSize" values above.
int rtpSocketNum = subsession->rtpSource()->RTPgs()->socketNum();
int receiveBufferSize
= increaseReceiveBufferTo(*env, rtpSocketNum,
desiredReceiveBufferSize);
if (verbose > 0) {
fprintf(stderr, "Increased %s socket receive buffer to %d bytes \n",
subsession->mediumName(), receiveBufferSize);
}
if (rtspClient != NULL) {
// Issue a RTSP "SETUP" command on the chosen subsession:
if (!rtspClient->setupMediaSubsession(*subsession, False,
rtsp_transport_tcp)) break;
if (!strcmp(subsession->mediumName(), "audio"))
audiofound = 1;
if (!strcmp(subsession->mediumName(), "video"))
videofound = 1;
}
}
}
if (rtspClient != NULL) {
// Issue a RTSP aggregate "PLAY" command on the whole session:
if (!rtspClient->playMediaSession(*mediaSession)) break;
} else if (sipClient != NULL) {
sipClient->sendACK(); // to start the stream flowing
}
// Now that the session is ready to be read, do additional
// MPlayer codec-specific initialization on each subsession:
iter.reset();
while ((subsession = iter.next()) != NULL) {
if (subsession->readSource() == NULL) continue; // not reading this
unsigned flags = 0;
if (strcmp(subsession->mediumName(), "audio") == 0) {
rtpState->audioBufferQueue
= new ReadBufferQueue(subsession, demuxer, "audio");
rtpState->audioBufferQueue->otherQueue = &(rtpState->videoBufferQueue);
rtpCodecInitialize_audio(demuxer, subsession, flags);
} else if (strcmp(subsession->mediumName(), "video") == 0) {
rtpState->videoBufferQueue
= new ReadBufferQueue(subsession, demuxer, "video");
rtpState->videoBufferQueue->otherQueue = &(rtpState->audioBufferQueue);
rtpCodecInitialize_video(demuxer, subsession, flags);
}
rtpState->flags |= flags;
}
success = True;
} while (0);
if (!success) return NULL; // an error occurred
// Hack: If audio and video are demuxed together on a single RTP stream,
// then create a new "demuxer_t" structure to allow the higher-level
// code to recognize this:
if (demux_is_multiplexed_rtp_stream(demuxer)) {
stream_t* s = new_ds_stream(demuxer->video);
demuxer_t* od = demux_open(opts, s, DEMUXER_TYPE_UNKNOWN,
opts->audio_id, opts->video_id, opts->sub_id,
NULL);
demuxer = new_demuxers_demuxer(od, od, od);
}
return demuxer;
}
extern "C" int demux_is_mpeg_rtp_stream(demuxer_t* demuxer) {
// Get the RTP state that was stored in the demuxer's 'priv' field:
RTPState* rtpState = (RTPState*)(demuxer->priv);
return (rtpState->flags&RTPSTATE_IS_MPEG12_VIDEO) != 0;
}
extern "C" int demux_is_multiplexed_rtp_stream(demuxer_t* demuxer) {
// Get the RTP state that was stored in the demuxer's 'priv' field:
RTPState* rtpState = (RTPState*)(demuxer->priv);
return (rtpState->flags&RTPSTATE_IS_MULTIPLEXED) != 0;
}
static demux_packet_t* getBuffer(demuxer_t* demuxer, demux_stream_t* ds,
Boolean mustGetNewData,
float& ptsBehind); // forward
extern "C" int demux_rtp_fill_buffer(demuxer_t* demuxer, demux_stream_t* ds) {
// Get a filled-in "demux_packet" from the RTP source, and deliver it.
// Note that this is called as a synchronous read operation, so it needs
// to block in the (hopefully infrequent) case where no packet is
// immediately available.
while (1) {
float ptsBehind;
demux_packet_t* dp = getBuffer(demuxer, ds, False, ptsBehind); // blocking
if (dp == NULL) return 0;
if (demuxer->stream->eof) return 0; // source stream has closed down
// Before using this packet, check to make sure that its presentation
// time is not far behind the other stream (if any). If it is,
// then we discard this packet, and get another instead. (The rest of
// MPlayer doesn't always do a good job of synchronizing when the
// audio and video streams get this far apart.)
// (We don't do this when streaming over TCP, because then the audio and
// video streams are interleaved.)
// (Also, if the stream is *excessively* far behind, then we allow
// the packet, because in this case it probably means that there was
// an error in the source's timestamp synchronization.)
const float ptsBehindThreshold = 1.0; // seconds
const float ptsBehindLimit = 60.0; // seconds
if (ptsBehind < ptsBehindThreshold ||
ptsBehind > ptsBehindLimit ||
rtsp_transport_tcp) { // packet's OK
ds_add_packet(ds, dp);
break;
}
#ifdef DEBUG_PRINT_DISCARDED_PACKETS
RTPState* rtpState = (RTPState*)(demuxer->priv);
ReadBufferQueue* bufferQueue = ds == demuxer->video ? rtpState->videoBufferQueue : rtpState->audioBufferQueue;
fprintf(stderr, "Discarding %s packet (%fs behind)\n", bufferQueue->tag(), ptsBehind);
#endif
free_demux_packet(dp); // give back this packet, and get another one
}
return 1;
}
Boolean awaitRTPPacket(demuxer_t* demuxer, demux_stream_t* ds,
unsigned char*& packetData, unsigned& packetDataLen,
float& pts) {
// Similar to "demux_rtp_fill_buffer()", except that the "demux_packet"
// is not delivered to the "demux_stream".
float ptsBehind;
demux_packet_t* dp = getBuffer(demuxer, ds, True, ptsBehind); // blocking
if (dp == NULL) return False;
packetData = dp->buffer;
packetDataLen = dp->len;
pts = dp->pts;
return True;
}
static void teardownRTSPorSIPSession(RTPState* rtpState); // forward
extern "C" void demux_close_rtp(demuxer_t* demuxer) {
// Reclaim all RTP-related state:
// Get the RTP state that was stored in the demuxer's 'priv' field:
RTPState* rtpState = (RTPState*)(demuxer->priv);
if (rtpState == NULL) return;
teardownRTSPorSIPSession(rtpState);
UsageEnvironment* env = NULL;
TaskScheduler* scheduler = NULL;
if (rtpState->mediaSession != NULL) {
env = &(rtpState->mediaSession->envir());
scheduler = &(env->taskScheduler());
}
Medium::close(rtpState->mediaSession);
Medium::close(rtpState->rtspClient);
Medium::close(rtpState->sipClient);
delete rtpState->audioBufferQueue;
delete rtpState->videoBufferQueue;
delete[] rtpState->sdpDescription;
delete rtpState;
av_freep(&avcctx);
env->reclaim(); delete scheduler;
}
////////// Extra routines that help implement the above interface functions:
#define MAX_RTP_FRAME_SIZE 5000000
// >= the largest conceivable frame composed from one or more RTP packets
static void afterReading(void* clientData, unsigned frameSize,
unsigned /*numTruncatedBytes*/,
struct timeval presentationTime,
unsigned /*durationInMicroseconds*/) {
int headersize = 0;
if (frameSize >= MAX_RTP_FRAME_SIZE) {
fprintf(stderr, "Saw an input frame too large (>=%d). Increase MAX_RTP_FRAME_SIZE in \"demux_rtp.cpp\".\n",
MAX_RTP_FRAME_SIZE);
}
ReadBufferQueue* bufferQueue = (ReadBufferQueue*)clientData;
demuxer_t* demuxer = bufferQueue->ourDemuxer();
RTPState* rtpState = (RTPState*)(demuxer->priv);
if (frameSize > 0) demuxer->stream->eof = 0;
demux_packet_t* dp = bufferQueue->dp;
if (bufferQueue->readSource()->isAMRAudioSource())
headersize = 1;
else if (bufferQueue == rtpState->videoBufferQueue &&
((sh_video_t*)demuxer->video->sh)->format == mmioFOURCC('H','2','6','4')) {
dp->buffer[0]=0x00;
dp->buffer[1]=0x00;
dp->buffer[2]=0x01;
headersize = 3;
}
resize_demux_packet(dp, frameSize + headersize);
// Set the packet's presentation time stamp, depending on whether or
// not our RTP source's timestamps have been synchronized yet:
Boolean hasBeenSynchronized
= bufferQueue->rtpSource()->hasBeenSynchronizedUsingRTCP();
if (hasBeenSynchronized) {
if (verbose > 0 && !bufferQueue->prevPacketWasSynchronized) {
fprintf(stderr, "%s stream has been synchronized using RTCP \n",
bufferQueue->tag());
}
struct timeval* fst = &(rtpState->firstSyncTime); // abbrev
if (fst->tv_sec == 0 && fst->tv_usec == 0) {
*fst = presentationTime;
}
// For the "pts" field, use the time differential from the first
// synchronized time, rather than absolute time, in order to avoid
// round-off errors when converting to a float:
dp->pts = presentationTime.tv_sec - fst->tv_sec
+ (presentationTime.tv_usec - fst->tv_usec)/1000000.0;
bufferQueue->prevPacketPTS = dp->pts;
} else {
if (verbose > 0 && bufferQueue->prevPacketWasSynchronized) {
fprintf(stderr, "%s stream is no longer RTCP-synchronized \n",
bufferQueue->tag());
}
// use the previous packet's "pts" once again:
dp->pts = bufferQueue->prevPacketPTS;
}
bufferQueue->prevPacketWasSynchronized = hasBeenSynchronized;
dp->pos = demuxer->filepos;
demuxer->filepos += frameSize + headersize;
// Signal any pending 'doEventLoop()' call on this queue:
bufferQueue->blockingFlag = ~0;
}
static void onSourceClosure(void* clientData) {
ReadBufferQueue* bufferQueue = (ReadBufferQueue*)clientData;
demuxer_t* demuxer = bufferQueue->ourDemuxer();
demuxer->stream->eof = 1;
// Signal any pending 'doEventLoop()' call on this queue:
bufferQueue->blockingFlag = ~0;
}
static demux_packet_t* getBuffer(demuxer_t* demuxer, demux_stream_t* ds,
Boolean mustGetNewData,
float& ptsBehind) {
// Begin by finding the buffer queue that we want to read from:
// (Get this from the RTP state, which we stored in
// the demuxer's 'priv' field)
RTPState* rtpState = (RTPState*)(demuxer->priv);
ReadBufferQueue* bufferQueue = NULL;
int headersize = 0;
int waitboth = 0;
TaskToken task, task2;
if (demuxer->stream->eof) return NULL;
if (ds == demuxer->video) {
bufferQueue = rtpState->audioBufferQueue;
// HACK: for the latest versions we must also receive audio
// when probing for video FPS, otherwise the stream just hangs
// and times out
if (mustGetNewData &&
bufferQueue &&
bufferQueue->readSource() &&
!bufferQueue->nextpacket) {
headersize = bufferQueue->readSource()->isAMRAudioSource() ? 1 : 0;
demux_packet_t *dp = new_demux_packet(MAX_RTP_FRAME_SIZE);
bufferQueue->dp = dp;
bufferQueue->blockingFlag = 0;
bufferQueue->readSource()->getNextFrame(
&dp->buffer[headersize], MAX_RTP_FRAME_SIZE - headersize,
afterReading, bufferQueue,
onSourceClosure, bufferQueue);
task2 = bufferQueue->readSource()->envir().taskScheduler().
scheduleDelayedTask(10000000, onSourceClosure, bufferQueue);
waitboth = 1;
}
bufferQueue = rtpState->videoBufferQueue;
if (((sh_video_t*)ds->sh)->format == mmioFOURCC('H','2','6','4'))
headersize = 3;
} else if (ds == demuxer->audio) {
bufferQueue = rtpState->audioBufferQueue;
if (bufferQueue->readSource()->isAMRAudioSource())
headersize = 1;
} else {
fprintf(stderr, "(demux_rtp)getBuffer: internal error: unknown stream\n");
return NULL;
}
if (bufferQueue == NULL || bufferQueue->readSource() == NULL) {
fprintf(stderr, "(demux_rtp)getBuffer failed: no appropriate RTP subsession has been set up\n");
return NULL;
}
demux_packet_t* dp = NULL;
if (!mustGetNewData) {
// Check whether we have a previously-saved buffer that we can use:
dp = bufferQueue->getPendingBuffer();
if (dp != NULL) {
ptsBehind = 0.0; // so that we always accept this data
return dp;
}
}
// Allocate a new packet buffer, and arrange to read into it:
if (!bufferQueue->nextpacket) {
dp = new_demux_packet(MAX_RTP_FRAME_SIZE);
bufferQueue->dp = dp;
if (dp == NULL) return NULL;
}
extern AVCodecParserContext * h264parserctx;
int consumed, poutbuf_size = 1;
const uint8_t *poutbuf = NULL;
float lastpts = 0.0;
do {
if (!bufferQueue->nextpacket) {
// Schedule the read operation:
bufferQueue->blockingFlag = 0;
bufferQueue->readSource()->getNextFrame(&dp->buffer[headersize], MAX_RTP_FRAME_SIZE - headersize,
afterReading, bufferQueue,
onSourceClosure, bufferQueue);
// Block ourselves until data becomes available:
TaskScheduler& scheduler
= bufferQueue->readSource()->envir().taskScheduler();
int delay = 10000000;
if (bufferQueue->prevPacketPTS * 1.05 > rtpState->mediaSession->playEndTime())
delay /= 10;
task = scheduler.scheduleDelayedTask(delay, onSourceClosure, bufferQueue);
scheduler.doEventLoop(&bufferQueue->blockingFlag);
scheduler.unscheduleDelayedTask(task);
if (waitboth) {
scheduler.doEventLoop(&rtpState->audioBufferQueue->blockingFlag);
scheduler.unscheduleDelayedTask(task2);
}
if (demuxer->stream->eof) {
free_demux_packet(dp);
return NULL;
}
if (headersize == 1) // amr
dp->buffer[0] =
((AMRAudioSource*)bufferQueue->readSource())->lastFrameHeader();
} else {
bufferQueue->dp = dp = bufferQueue->nextpacket;
bufferQueue->nextpacket = NULL;
}
if (headersize == 3 && h264parserctx) { // h264
consumed = h264parserctx->parser->parser_parse(h264parserctx,
avcctx,
&poutbuf, &poutbuf_size,
dp->buffer, dp->len);
if (!consumed && !poutbuf_size)
return NULL;
if (!poutbuf_size) {
lastpts=dp->pts;
free_demux_packet(dp);
bufferQueue->dp = dp = new_demux_packet(MAX_RTP_FRAME_SIZE);
} else {
bufferQueue->nextpacket = dp;
bufferQueue->dp = dp = new_demux_packet(poutbuf_size);
memcpy(dp->buffer, poutbuf, poutbuf_size);
dp->pts=lastpts;
}
}
} while (!poutbuf_size);
// Set the "ptsBehind" result parameter:
if (bufferQueue->prevPacketPTS != 0.0
&& bufferQueue->prevPacketWasSynchronized
&& *(bufferQueue->otherQueue) != NULL
&& (*(bufferQueue->otherQueue))->prevPacketPTS != 0.0
&& (*(bufferQueue->otherQueue))->prevPacketWasSynchronized) {
ptsBehind = (*(bufferQueue->otherQueue))->prevPacketPTS
- bufferQueue->prevPacketPTS;
} else {
ptsBehind = 0.0;
}
if (mustGetNewData) {
// Save this buffer for future reads:
bufferQueue->savePendingBuffer(dp);
}
return dp;
}
static void teardownRTSPorSIPSession(RTPState* rtpState) {
MediaSession* mediaSession = rtpState->mediaSession;
if (mediaSession == NULL) return;
if (rtpState->rtspClient != NULL) {
rtpState->rtspClient->teardownMediaSession(*mediaSession);
} else if (rtpState->sipClient != NULL) {
rtpState->sipClient->sendBYE();
}
}
////////// "ReadBuffer" and "ReadBufferQueue" implementation:
ReadBufferQueue::ReadBufferQueue(MediaSubsession* subsession,
demuxer_t* demuxer, char const* tag)
: prevPacketWasSynchronized(False), prevPacketPTS(0.0), otherQueue(NULL),
dp(NULL), nextpacket(NULL),
pendingDPHead(NULL), pendingDPTail(NULL),
fReadSource(subsession == NULL ? NULL : subsession->readSource()),
fRTPSource(subsession == NULL ? NULL : subsession->rtpSource()),
fOurDemuxer(demuxer), fTag(strdup(tag)) {
}
ReadBufferQueue::~ReadBufferQueue() {
free((void *)fTag);
// Free any pending buffers (that never got delivered):
demux_packet_t* dp = pendingDPHead;
while (dp != NULL) {
demux_packet_t* dpNext = dp->next;
dp->next = NULL;
free_demux_packet(dp);
dp = dpNext;
}
}
void ReadBufferQueue::savePendingBuffer(demux_packet_t* dp) {
// Keep this buffer around, until MPlayer asks for it later:
if (pendingDPTail == NULL) {
pendingDPHead = pendingDPTail = dp;
} else {
pendingDPTail->next = dp;
pendingDPTail = dp;
}
dp->next = NULL;
}
demux_packet_t* ReadBufferQueue::getPendingBuffer() {
demux_packet_t* dp = pendingDPHead;
if (dp != NULL) {
pendingDPHead = dp->next;
if (pendingDPHead == NULL) pendingDPTail = NULL;
dp->next = NULL;
}
return dp;
}
static int demux_rtp_control(struct demuxer *demuxer, int cmd, void *arg) {
double endpts = ((RTPState*)demuxer->priv)->mediaSession->playEndTime();
switch(cmd) {
case DEMUXER_CTRL_GET_TIME_LENGTH:
if (endpts <= 0)
return DEMUXER_CTRL_DONTKNOW;
*((double *)arg) = endpts;
return DEMUXER_CTRL_OK;
case DEMUXER_CTRL_GET_PERCENT_POS:
if (endpts <= 0)
return DEMUXER_CTRL_DONTKNOW;
*((int *)arg) = (int)(((RTPState*)demuxer->priv)->videoBufferQueue->prevPacketPTS*100/endpts);
return DEMUXER_CTRL_OK;
default:
return DEMUXER_CTRL_NOTIMPL;
}
}
demuxer_desc_t demuxer_desc_rtp = {
"LIVE555 RTP demuxer",
"live555",
"",
"Ross Finlayson",
"requires LIVE555 Streaming Media library",
DEMUXER_TYPE_RTP,
0, // no autodetect
NULL,
demux_rtp_fill_buffer,
demux_open_rtp,
demux_close_rtp,
NULL,
demux_rtp_control
};

View File

@ -1,43 +0,0 @@
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MPLAYER_DEMUX_RTP_H
#define MPLAYER_DEMUX_RTP_H
#include <stdlib.h>
#include <stdio.h>
#include "demuxer.h"
// Open a RTP demuxer (which was initiated either from a SDP file,
// or from a RTSP URL):
demuxer_t* demux_open_rtp(demuxer_t* demuxer);
// Test whether a RTP demuxer is for a MPEG stream:
int demux_is_mpeg_rtp_stream(demuxer_t* demuxer);
// Test whether a RTP demuxer contains combined (multiplexed)
// audio+video (and so needs to be demuxed by higher-level code):
int demux_is_multiplexed_rtp_stream(demuxer_t* demuxer);
// Read from a RTP demuxer:
int demux_rtp_fill_buffer(demuxer_t *demux, demux_stream_t* ds);
// Close a RTP demuxer
void demux_close_rtp(demuxer_t* demuxer);
#endif /* MPLAYER_DEMUX_RTP_H */

View File

@ -1,426 +0,0 @@
/*
* codec-specific routines used to interface between MPlayer
* and the "LIVE555 Streaming Media" libraries
*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "demux_rtp_internal.h"
extern "C" {
#include <limits.h>
#include <math.h>
#include "stheader.h"
#include "libavutil/base64.h"
}
AVCodecParserContext * h264parserctx;
AVCodecContext *avcctx;
// Copied from vlc
static unsigned char* parseH264ConfigStr( char const* configStr,
unsigned int& configSize )
{
char *dup, *psz;
int i, i_records = 1;
if( configSize )
configSize = 0;
if( configStr == NULL || *configStr == '\0' )
return NULL;
psz = dup = strdup( configStr );
/* Count the number of comma's */
for( psz = dup; *psz != '\0'; ++psz )
{
if( *psz == ',')
{
++i_records;
*psz = '\0';
}
}
unsigned char *cfg = new unsigned char[5 * strlen(dup)];
psz = dup;
for( i = 0; i < i_records; i++ )
{
cfg[configSize++] = 0x00;
cfg[configSize++] = 0x00;
cfg[configSize++] = 0x01;
configSize += av_base64_decode( (uint8_t*)&cfg[configSize],
psz,
5 * strlen(dup) - 3 );
psz += strlen(psz)+1;
}
free( dup );
return cfg;
}
static void
needVideoFrameRate(demuxer_t* demuxer, MediaSubsession* subsession); // forward
static Boolean
parseQTState_video(QuickTimeGenericRTPSource::QTState const& qtState,
unsigned& fourcc); // forward
static Boolean
parseQTState_audio(QuickTimeGenericRTPSource::QTState const& qtState,
unsigned& fourcc, unsigned& numChannels); // forward
static BITMAPINFOHEADER * insertVideoExtradata(BITMAPINFOHEADER *bih,
unsigned char * extraData,
unsigned size)
{
BITMAPINFOHEADER * original = bih;
if (!size || size > INT_MAX - sizeof(BITMAPINFOHEADER))
return bih;
bih = (BITMAPINFOHEADER*)realloc(bih, sizeof(BITMAPINFOHEADER) + size);
if (!bih)
return original;
bih->biSize = sizeof(BITMAPINFOHEADER) + size;
memcpy(bih+1, extraData, size);
return bih;
}
void rtpCodecInitialize_video(demuxer_t* demuxer,
MediaSubsession* subsession,
unsigned& flags) {
flags = 0;
// Create a dummy video stream header
// to make the main MPlayer code happy:
sh_video_t* sh_video = new_sh_video(demuxer,0);
BITMAPINFOHEADER* bih
= (BITMAPINFOHEADER*)calloc(1,sizeof(BITMAPINFOHEADER));
bih->biSize = sizeof(BITMAPINFOHEADER);
sh_video->bih = bih;
demux_stream_t* d_video = demuxer->video;
d_video->sh = sh_video; sh_video->ds = d_video;
// Map known video MIME types to the BITMAPINFOHEADER parameters
// that this program uses. (Note that not all types need all
// of the parameters to be set.)
if (strcmp(subsession->codecName(), "MPV") == 0) {
flags |= RTPSTATE_IS_MPEG12_VIDEO;
} else if (strcmp(subsession->codecName(), "MP1S") == 0 ||
strcmp(subsession->codecName(), "MP2T") == 0) {
flags |= RTPSTATE_IS_MPEG12_VIDEO|RTPSTATE_IS_MULTIPLEXED;
} else if (strcmp(subsession->codecName(), "H263") == 0 ||
strcmp(subsession->codecName(), "H263-2000") == 0 ||
strcmp(subsession->codecName(), "H263-1998") == 0) {
bih->biCompression = sh_video->format
= mmioFOURCC('H','2','6','3');
needVideoFrameRate(demuxer, subsession);
} else if (strcmp(subsession->codecName(), "H264") == 0) {
bih->biCompression = sh_video->format
= mmioFOURCC('H','2','6','4');
unsigned int configLen = 0;
unsigned char* configData
= parseH264ConfigStr(subsession->fmtp_spropparametersets(), configLen);
sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen);
int fooLen;
const uint8_t* fooData;
h264parserctx = av_parser_init(CODEC_ID_H264);
avcctx = avcodec_alloc_context3(NULL);
// Pass the config to the parser
h264parserctx->parser->parser_parse(h264parserctx, avcctx,
&fooData, &fooLen, configData, configLen);
delete[] configData;
needVideoFrameRate(demuxer, subsession);
} else if (strcmp(subsession->codecName(), "H261") == 0) {
bih->biCompression = sh_video->format
= mmioFOURCC('H','2','6','1');
needVideoFrameRate(demuxer, subsession);
} else if (strcmp(subsession->codecName(), "JPEG") == 0) {
bih->biCompression = sh_video->format
= mmioFOURCC('M','J','P','G');
needVideoFrameRate(demuxer, subsession);
} else if (strcmp(subsession->codecName(), "MP4V-ES") == 0) {
bih->biCompression = sh_video->format
= mmioFOURCC('m','p','4','v');
// For the codec to work correctly, it may need a 'VOL Header' to be
// inserted at the front of the data stream. Construct this from the
// "config" MIME parameter, which was present (hopefully) in the
// session's SDP description:
unsigned configLen;
unsigned char* configData
= parseGeneralConfigStr(subsession->fmtp_config(), configLen);
sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen);
needVideoFrameRate(demuxer, subsession);
} else if (strcmp(subsession->codecName(), "X-QT") == 0 ||
strcmp(subsession->codecName(), "X-QUICKTIME") == 0) {
// QuickTime generic RTP format, as described in
// http://developer.apple.com/quicktime/icefloe/dispatch026.html
// We can't initialize this stream until we've received the first packet
// that has QuickTime "sdAtom" information in the header. So, keep
// reading packets until we get one:
unsigned char* packetData; unsigned packetDataLen; float pts;
QuickTimeGenericRTPSource* qtRTPSource
= (QuickTimeGenericRTPSource*)(subsession->rtpSource());
unsigned fourcc;
do {
if (!awaitRTPPacket(demuxer, demuxer->video,
packetData, packetDataLen, pts)) {
return;
}
} while (!parseQTState_video(qtRTPSource->qtState, fourcc));
bih->biCompression = sh_video->format = fourcc;
bih->biWidth = qtRTPSource->qtState.width;
bih->biHeight = qtRTPSource->qtState.height;
if (qtRTPSource->qtState.sdAtomSize > 83)
bih->biBitCount = qtRTPSource->qtState.sdAtom[83];
uint8_t *pos = (uint8_t*)qtRTPSource->qtState.sdAtom + 86;
uint8_t *endpos = (uint8_t*)qtRTPSource->qtState.sdAtom
+ qtRTPSource->qtState.sdAtomSize;
while (pos+8 < endpos) {
unsigned atomLength = pos[0]<<24 | pos[1]<<16 | pos[2]<<8 | pos[3];
if (atomLength == 0 || atomLength > endpos-pos) break;
if (((!memcmp(pos+4, "avcC", 4) && fourcc==mmioFOURCC('a','v','c','1')) ||
!memcmp(pos+4, "esds", 4) ||
(!memcmp(pos+4, "SMI ", 4) && fourcc==mmioFOURCC('S','V','Q','3'))) &&
atomLength > 8) {
sh_video->bih = bih =
insertVideoExtradata(bih, pos+8, atomLength-8);
break;
}
pos += atomLength;
}
needVideoFrameRate(demuxer, subsession);
} else {
fprintf(stderr,
"Unknown MPlayer format code for MIME type \"video/%s\"\n",
subsession->codecName());
}
}
void rtpCodecInitialize_audio(demuxer_t* demuxer,
MediaSubsession* subsession,
unsigned& flags) {
flags = 0;
// Create a dummy audio stream header
// to make the main MPlayer code happy:
sh_audio_t* sh_audio = new_sh_audio(demuxer,0);
WAVEFORMATEX* wf = (WAVEFORMATEX*)calloc(1,sizeof(WAVEFORMATEX));
sh_audio->wf = wf;
demux_stream_t* d_audio = demuxer->audio;
d_audio->sh = sh_audio; sh_audio->ds = d_audio;
d_audio->id = sh_audio->aid;
wf->nChannels = subsession->numChannels();
// Map known audio MIME types to the WAVEFORMATEX parameters
// that this program uses. (Note that not all types need all
// of the parameters to be set.)
wf->nSamplesPerSec
= subsession->rtpSource()->timestampFrequency(); // by default
if (strcmp(subsession->codecName(), "MPA") == 0 ||
strcmp(subsession->codecName(), "MPA-ROBUST") == 0 ||
strcmp(subsession->codecName(), "X-MP3-DRAFT-00") == 0) {
wf->wFormatTag = sh_audio->format = 0x55;
// Note: 0x55 is for layer III, but should work for I,II also
wf->nSamplesPerSec = 0; // sample rate is deduced from the data
} else if (strcmp(subsession->codecName(), "AC3") == 0) {
wf->wFormatTag = sh_audio->format = 0x2000;
wf->nSamplesPerSec = 0; // sample rate is deduced from the data
} else if (strcmp(subsession->codecName(), "L16") == 0) {
wf->wFormatTag = sh_audio->format = 0x736f7774; // "twos"
wf->nBlockAlign = 1;
wf->wBitsPerSample = 16;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "L8") == 0) {
wf->wFormatTag = sh_audio->format = 0x20776172; // "raw "
wf->nBlockAlign = 1;
wf->wBitsPerSample = 8;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "PCMU") == 0) {
wf->wFormatTag = sh_audio->format = 0x7;
wf->nAvgBytesPerSec = 8000;
wf->nBlockAlign = 1;
wf->wBitsPerSample = 8;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "PCMA") == 0) {
wf->wFormatTag = sh_audio->format = 0x6;
wf->nAvgBytesPerSec = 8000;
wf->nBlockAlign = 1;
wf->wBitsPerSample = 8;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "AMR") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('s','a','m','r');
} else if (strcmp(subsession->codecName(), "AMR-WB") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('s','a','w','b');
} else if (strcmp(subsession->codecName(), "GSM") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('a','g','s','m');
wf->nAvgBytesPerSec = 1650;
wf->nBlockAlign = 33;
wf->wBitsPerSample = 16;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "QCELP") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('Q','c','l','p');
wf->nAvgBytesPerSec = 1750;
wf->nBlockAlign = 35;
wf->wBitsPerSample = 16;
wf->cbSize = 0;
} else if (strcmp(subsession->codecName(), "MP4A-LATM") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('m','p','4','a');
// For the codec to work correctly, it needs "AudioSpecificConfig"
// data, which is parsed from the "StreamMuxConfig" string that
// was present (hopefully) in the SDP description:
unsigned codecdata_len;
sh_audio->codecdata
= parseStreamMuxConfigStr(subsession->fmtp_config(),
codecdata_len);
sh_audio->codecdata_len = codecdata_len;
//faad doesn't understand LATM's data length field, so omit it
((MPEG4LATMAudioRTPSource*)subsession->rtpSource())->omitLATMDataLengthField();
} else if (strcmp(subsession->codecName(), "MPEG4-GENERIC") == 0) {
wf->wFormatTag = sh_audio->format = mmioFOURCC('m','p','4','a');
// For the codec to work correctly, it needs "AudioSpecificConfig"
// data, which was present (hopefully) in the SDP description:
unsigned codecdata_len;
sh_audio->codecdata
= parseGeneralConfigStr(subsession->fmtp_config(),
codecdata_len);
sh_audio->codecdata_len = codecdata_len;
} else if (strcmp(subsession->codecName(), "X-QT") == 0 ||
strcmp(subsession->codecName(), "X-QUICKTIME") == 0) {
// QuickTime generic RTP format, as described in
// http://developer.apple.com/quicktime/icefloe/dispatch026.html
// We can't initialize this stream until we've received the first packet
// that has QuickTime "sdAtom" information in the header. So, keep
// reading packets until we get one:
unsigned char* packetData; unsigned packetDataLen; float pts;
QuickTimeGenericRTPSource* qtRTPSource
= (QuickTimeGenericRTPSource*)(subsession->rtpSource());
unsigned fourcc, numChannels;
do {
if (!awaitRTPPacket(demuxer, demuxer->audio,
packetData, packetDataLen, pts)) {
return;
}
} while (!parseQTState_audio(qtRTPSource->qtState, fourcc, numChannels));
wf->wFormatTag = sh_audio->format = fourcc;
wf->nChannels = numChannels;
if (qtRTPSource->qtState.sdAtomSize > 33) {
wf->wBitsPerSample = qtRTPSource->qtState.sdAtom[27];
wf->nSamplesPerSec = qtRTPSource->qtState.sdAtom[32]<<8|qtRTPSource->qtState.sdAtom[33];
}
uint8_t *pos = (uint8_t*)qtRTPSource->qtState.sdAtom + 52;
uint8_t *endpos = (uint8_t*)qtRTPSource->qtState.sdAtom
+ qtRTPSource->qtState.sdAtomSize;
while (pos+8 < endpos) {
unsigned atomLength = pos[0]<<24 | pos[1]<<16 | pos[2]<<8 | pos[3];
if (atomLength == 0 || atomLength > endpos-pos) break;
if (!memcmp(pos+4, "wave", 4) && fourcc==mmioFOURCC('Q','D','M','2') &&
atomLength > 8 &&
atomLength <= INT_MAX) {
sh_audio->codecdata = (unsigned char*) malloc(atomLength-8);
if (sh_audio->codecdata) {
memcpy(sh_audio->codecdata, pos+8, atomLength-8);
sh_audio->codecdata_len = atomLength-8;
}
break;
}
pos += atomLength;
}
} else {
fprintf(stderr,
"Unknown MPlayer format code for MIME type \"audio/%s\"\n",
subsession->codecName());
}
}
static void needVideoFrameRate(demuxer_t* demuxer,
MediaSubsession* subsession) {
// For some codecs, MPlayer's decoding software can't (or refuses to :-)
// figure out the frame rate by itself, so (unless the user specifies
// it manually, using "-fps") we figure it out ourselves here, using the
// presentation timestamps in successive packets,
extern double force_fps; if (force_fps != 0.0) return; // user used "-fps"
demux_stream_t* d_video = demuxer->video;
sh_video_t* sh_video = (sh_video_t*)(d_video->sh);
// If we already know the subsession's video frame rate, use it:
int fps = (int)(subsession->videoFPS());
if (fps != 0) {
sh_video->fps = fps;
sh_video->frametime = 1.0f/fps;
return;
}
// Keep looking at incoming frames until we see two with different,
// non-zero "pts" timestamps:
unsigned char* packetData; unsigned packetDataLen;
float lastPTS = 0.0, curPTS;
unsigned const maxNumFramesToWaitFor = 300;
int lastfps = 0;
for (unsigned i = 0; i < maxNumFramesToWaitFor; ++i) {
if (!awaitRTPPacket(demuxer, d_video, packetData, packetDataLen, curPTS)) {
break;
}
if (curPTS != lastPTS && lastPTS != 0.0) {
// Use the difference between these two "pts"s to guess the frame rate.
// (should really check that there were no missing frames inbetween)#####
// Guess the frame rate as an integer. If it's not, use "-fps" instead.
fps = (int)(1/fabs(curPTS-lastPTS) + 0.5); // rounding
if (fps == lastfps) {
fprintf(stderr, "demux_rtp: Guessed the video frame rate as %d frames-per-second.\n\t(If this is wrong, use the \"-fps <frame-rate>\" option instead.)\n", fps);
sh_video->fps = fps;
sh_video->frametime=1.0f/fps;
return;
}
if (fps>lastfps) lastfps = fps;
}
lastPTS = curPTS;
}
fprintf(stderr, "demux_rtp: Failed to guess the video frame rate\n");
}
static Boolean
parseQTState_video(QuickTimeGenericRTPSource::QTState const& qtState,
unsigned& fourcc) {
// qtState's "sdAtom" field is supposed to contain a QuickTime video
// 'sample description' atom. This atom's name is the 'fourcc' that we want:
char const* sdAtom = qtState.sdAtom;
if (sdAtom == NULL || qtState.sdAtomSize < 2*4) return False;
fourcc = *(unsigned*)(&sdAtom[4]); // put in host order
return True;
}
static Boolean
parseQTState_audio(QuickTimeGenericRTPSource::QTState const& qtState,
unsigned& fourcc, unsigned& numChannels) {
// qtState's "sdAtom" field is supposed to contain a QuickTime audio
// 'sample description' atom. This atom's name is the 'fourcc' that we want.
// Also, the top half of the 5th word following the atom name should
// contain the number of channels ("numChannels") that we want:
char const* sdAtom = qtState.sdAtom;
if (sdAtom == NULL || qtState.sdAtomSize < 7*4) return False;
fourcc = *(unsigned*)(&sdAtom[4]); // put in host order
char const* word7Ptr = &sdAtom[6*4];
numChannels = (word7Ptr[0]<<8)|(word7Ptr[1]);
return True;
}

View File

@ -1,54 +0,0 @@
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MPLAYER_DEMUX_RTP_INTERNAL_H
#define MPLAYER_DEMUX_RTP_INTERNAL_H
#include <stdlib.h>
extern "C" {
#include "demuxer.h"
#include "config.h"
#include "libavcodec/avcodec.h"
}
#ifndef _LIVEMEDIA_HH
#undef STREAM_SEEK
#include <liveMedia.hh>
#endif
// Codec-specific initialization routines:
void rtpCodecInitialize_video(demuxer_t* demuxer,
MediaSubsession* subsession, unsigned& flags);
void rtpCodecInitialize_audio(demuxer_t* demuxer,
MediaSubsession* subsession, unsigned& flags);
// Flags that may be set by the above routines:
#define RTPSTATE_IS_MPEG12_VIDEO 0x1 // is a MPEG-1 or 2 video stream
#define RTPSTATE_IS_MULTIPLEXED 0x2 // is a combined audio+video stream
// A routine to wait for the first packet of a RTP stream to arrive.
// (For some RTP payload formats, codecs cannot be fully initialized until
// we've started receiving data.)
Boolean awaitRTPPacket(demuxer_t* demuxer, demux_stream_t* ds,
unsigned char*& packetData, unsigned& packetDataLen,
float& pts);
// "streamType": 0 => video; 1 => audio
// This routine returns False if the input stream has closed
#endif /* MPLAYER_DEMUX_RTP_INTERNAL_H */

View File

@ -61,7 +61,6 @@ extern const demuxer_desc_t demuxer_desc_asf;
extern const demuxer_desc_t demuxer_desc_matroska;
extern const demuxer_desc_t demuxer_desc_gif;
extern const demuxer_desc_t demuxer_desc_rawdv;
extern const demuxer_desc_t demuxer_desc_rtp;
extern const demuxer_desc_t demuxer_desc_rtp_nemesi;
extern const demuxer_desc_t demuxer_desc_lavf;
extern const demuxer_desc_t demuxer_desc_lavf_preferred;
@ -87,9 +86,6 @@ const demuxer_desc_t *const demuxer_list[] = {
#ifdef CONFIG_GIF
&demuxer_desc_gif,
#endif
#ifdef CONFIG_LIVE555
&demuxer_desc_rtp,
#endif
#ifdef CONFIG_LIBNEMESI
&demuxer_desc_rtp_nemesi,
#endif

View File

@ -53,7 +53,6 @@ enum demuxer_type {
DEMUXER_TYPE_Y4M,
DEMUXER_TYPE_MF,
DEMUXER_TYPE_RAWAUDIO,
DEMUXER_TYPE_RTP,
DEMUXER_TYPE_RAWDV,
DEMUXER_TYPE_RAWVIDEO,
DEMUXER_TYPE_GIF,

View File

@ -37,10 +37,6 @@
/* biCompression constant */
#define BI_RGB 0L
#ifdef CONFIG_LIVE555
#include "demux_rtp.h"
#endif
int video_read_properties(sh_video_t *sh_video){
demux_stream_t *d_video=sh_video->ds;

View File

@ -441,10 +441,6 @@ int stream_enable_cache(stream_t *stream,int64_t size,int64_t min,int64_t seek_l
int res = -1;
cache_vars_t* s;
if (stream->flags & STREAM_NON_CACHEABLE) {
mp_msg(MSGT_CACHE,MSGL_STATUS,"\rThis stream is non-cacheable\n");
return 1;
}
if (size > SIZE_MAX) {
mp_msg(MSGT_CACHE, MSGL_FATAL, "Cache size larger than max. allocation size\n");
return -1;

View File

@ -72,8 +72,6 @@ extern const stream_info_t stream_info_pvr;
extern const stream_info_t stream_info_ftp;
extern const stream_info_t stream_info_vstream;
extern const stream_info_t stream_info_smb;
extern const stream_info_t stream_info_sdp;
extern const stream_info_t stream_info_rtsp_sip;
extern const stream_info_t stream_info_null;
extern const stream_info_t stream_info_mf;
@ -96,10 +94,6 @@ static const stream_info_t* const auto_open_streams[] = {
&stream_info_asf,
#ifdef CONFIG_LIBNEMESI
&stream_info_rtsp,
#endif
#ifdef CONFIG_LIVE555
&stream_info_sdp,
&stream_info_rtsp_sip,
#endif
&stream_info_udp,
&stream_info_http2,

View File

@ -70,10 +70,6 @@
#define MP_STREAM_SEEK_BW 2
#define MP_STREAM_SEEK_FW 4
#define MP_STREAM_SEEK (MP_STREAM_SEEK_BW|MP_STREAM_SEEK_FW)
/** This is a HACK for live555 that does not respect the
separation between stream an demuxer and thus is not
actually a stream cache can not be used */
#define STREAM_NON_CACHEABLE 8
//////////// Open return code
#define STREAM_REDIRECTED -2

View File

@ -1,132 +0,0 @@
/*
* This file is part of MPlayer.
*
* MPlayer is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* MPlayer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "stream.h"
#include "network.h"
#include "libmpdemux/demuxer.h"
extern int network_bandwidth;
static int _rtsp_streaming_seek(int fd, off_t pos, streaming_ctrl_t* streaming_ctrl) {
return -1; // For now, we don't handle RTSP stream seeking
}
static int rtsp_streaming_start(stream_t* stream) {
stream->streaming_ctrl->streaming_seek = _rtsp_streaming_seek;
return 0;
}
static int open_live_rtsp_sip(stream_t *stream,int mode, void* opts, int* file_format) {
URL_t *url;
stream->streaming_ctrl = streaming_ctrl_new();
if( stream->streaming_ctrl==NULL ) {
return STREAM_ERROR;
}
stream->streaming_ctrl->bandwidth = network_bandwidth;
url = url_new(stream->url);
stream->streaming_ctrl->url = check4proxies(url);
//url_free(url);
mp_msg(MSGT_OPEN, MSGL_INFO, "STREAM_LIVE555, URL: %s\n", stream->url);
if(rtsp_streaming_start(stream) < 0) {
mp_msg(MSGT_NETWORK,MSGL_ERR,"rtsp_streaming_start failed\n");
goto fail;
}
*file_format = DEMUXER_TYPE_RTP;
stream->type = STREAMTYPE_STREAM;
stream->flags = STREAM_NON_CACHEABLE;
return STREAM_OK;
fail:
streaming_ctrl_free( stream->streaming_ctrl );
stream->streaming_ctrl = NULL;
return STREAM_ERROR;
}
static int open_live_sdp(stream_t *stream,int mode, void* opts, int* file_format) {
int f;
char *filename = stream->url;
off_t len;
char* sdpDescription;
ssize_t numBytesRead;
if(strncmp("sdp://",filename,6) == 0) {
filename += 6;
f = open(filename,O_RDONLY|O_BINARY);
if(f < 0) {
mp_tmsg(MSGT_OPEN,MSGL_ERR,"File not found: '%s'\n",filename);
return STREAM_ERROR;
}
len=lseek(f,0,SEEK_END);
lseek(f,0,SEEK_SET);
if(len == -1)
return STREAM_ERROR;
if(len > SIZE_MAX - 1)
return STREAM_ERROR;
sdpDescription = malloc(len+1);
if(sdpDescription == NULL) return STREAM_ERROR;
numBytesRead = read(f, sdpDescription, len);
if(numBytesRead != len) {
free(sdpDescription);
return STREAM_ERROR;
}
sdpDescription[len] = '\0'; // to be safe
stream->priv = sdpDescription;
stream->type = STREAMTYPE_SDP;
*file_format = DEMUXER_TYPE_RTP;
return STREAM_OK;
}
return STREAM_UNSUPPORTED;
}
const stream_info_t stream_info_rtsp_sip = {
"standard RTSP and SIP",
"RTSP and SIP",
"Ross Finlayson",
"Uses LIVE555 Streaming Media library.",
open_live_rtsp_sip,
{"rtsp", "sip", NULL },
NULL,
0 // Urls are an option string
};
const stream_info_t stream_info_sdp = {
"SDP stream descriptor",
"SDP",
"Ross Finlayson",
"Uses LIVE555 Streaming Media library.",
open_live_sdp,
{"sdp", NULL },
NULL,
0 // Urls are an option string
};