fix various typos

Signed-off-by: Lou Logan <lou@lrcd.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Lou Logan 2014-06-02 18:46:04 -08:00
parent 137c5e115b
commit 88f2586adb
30 changed files with 40 additions and 40 deletions

View File

@ -9,10 +9,10 @@ version <next>:
- replaygain data export
- VP7 video decoder
- Alias PIX image encoder and decoder
- Improvments to the BRender PIX image decoder
- Improvments to the XBM decoder
- Improvements to the BRender PIX image decoder
- Improvements to the XBM decoder
- QTKit input device
- improvments to OpenEXR image decoder
- improvements to OpenEXR image decoder
- support decoding 16-bit RLE SGI images
- GDI screen grabbing for Windows
- alternative rendition support for HTTP Live Streaming

View File

@ -886,7 +886,7 @@ Set frame skip factor.
Set frame skip exponent.
Negative values behave identical to the corresponding positive ones, except
that the score is normalized.
Positive values exist primarly for compatibility reasons and are not so useful.
Positive values exist primarily for compatibility reasons and are not so useful.
@item skipcmp @var{integer} (@emph{encoding,video})
Set frame skip compare function.

View File

@ -80,7 +80,7 @@ thresholds with quantizer steps to find the appropriate quantization with
distortion below threshold band by band.
The quality of this method is comparable to the two loop searching method
descibed below, but somewhat a little better and slower.
described below, but somewhat a little better and slower.
@item anmr
Average noise to mask ratio (ANMR) trellis-based solution.

View File

@ -640,7 +640,7 @@ int main(int argc, char **argv)
"This program generates a synthetic stream and encodes it to a file\n"
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
"The encoded stream is then decoded and written to a raw data output.\n"
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
argv[0]);
return 1;
}

View File

@ -392,7 +392,7 @@ VOB and a few other formats do not have a global header that describes
everything present in the file. Instead, applications are supposed to scan
the file to see what it contains. Since VOB files are frequently large, only
the beginning is scanned. If the subtitles happen only later in the file,
they will not be initally detected.
they will not be initially detected.
Some applications, including the @code{ffmpeg} command-line tool, can only
work with streams that were detected during the initial scan; streams that

View File

@ -714,7 +714,7 @@ Change the syncpoint usage in nut:
@table @option
@item @var{default} use the normal low-overhead seeking aids.
@item @var{none} do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
Use of this option is not recommanded, as the resulting files are very damage
Use of this option is not recommended, as the resulting files are very damage
sensitive and seeking is not possible. Also in general the overhead from
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
all growing data tables, allowing to mux endless streams with limited memory

View File

@ -270,7 +270,7 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
if (!ist2->sub2video.frame)
continue;
/* subtitles seem to be usually muxed ahead of other streams;
if not, substracting a larger time here is necessary */
if not, subtracting a larger time here is necessary */
pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
/* do not send the heartbeat frame if the subtitle is already ahead */
if (pts2 <= ist2->sub2video.last_pts)

View File

@ -1946,7 +1946,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
avpriv_request_sample(ac->avctx, "SSR");
return AVERROR_PATCHWELCOME;
}
// I see no textual basis in the spec for this occuring after SSR gain
// I see no textual basis in the spec for this occurring after SSR gain
// control, but this is what both reference and real implmentations do
if (tns->present && er_syntax)
if (decode_tns(ac, tns, gb, ics) < 0)

View File

@ -575,7 +575,7 @@ typedef struct AVCodecDescriptor {
/**
* MIME type(s) associated with the codec.
* May be NULL; if not, a NULL-terminated array of MIME types.
* The first item is always non-NULL and is the prefered MIME type.
* The first item is always non-NULL and is the preferred MIME type.
*/
const char *const *mime_types;
} AVCodecDescriptor;
@ -4957,7 +4957,7 @@ AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
* @return >= 0 in case of success, or a negative error code in case of failure
*
* If the return value is positive, an output buffer is allocated and
* is availble in *poutbuf, and is distinct from the input buffer.
* is available in *poutbuf, and is distinct from the input buffer.
*
* If the return value is 0, the output buffer is not allocated and
* should be considered identical to the input buffer, or in case

View File

@ -2423,7 +2423,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
/* make sure that we have managed to get equivelant dts/avcodec channel
/* make sure that we have managed to get equivalent dts/avcodec channel
* masks in some sense -- unfortunately some channels could overlap */
if (av_popcount(channel_mask) != av_popcount(channel_layout)) {
av_log(avctx, AV_LOG_DEBUG,

View File

@ -868,7 +868,7 @@ static void put_subframe(DCAContext *c, int subframe)
for (band = 0; band < DCA_SUBBANDS; band++)
put_bits(&c->pb, 1, 0);
/* Prediction VQ addres: not transmitted */
/* Prediction VQ address: not transmitted */
/* Bit allocation index */
for (ch = 0; ch < c->fullband_channels; ch++)
for (band = 0; band < DCA_SUBBANDS; band++)

View File

@ -199,7 +199,7 @@ typedef struct DiracContext {
uint8_t *edge_emu_buffer[4];
uint8_t *edge_emu_buffer_base;
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
uint8_t *mcscratch;
int buffer_stride;
@ -694,7 +694,7 @@ static void lowdelay_subband(DiracContext *s, GetBitContext *gb, int quant,
IDWTELEM *buf1 = b1->ibuf + top * b1->stride;
IDWTELEM *buf2 = b2 ? b2->ibuf + top * b2->stride : NULL;
int x, y;
/* we have to constantly check for overread since the spec explictly
/* we have to constantly check for overread since the spec explicitly
requires this, with the meaning that all remaining coeffs are set to 0 */
if (get_bits_count(gb) >= bits_end)
return;

View File

@ -335,7 +335,7 @@ static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
}
if (ctx->frame_type == FRAMETYPE_INTER_SCAL && !ctx->is_scalable) {
av_log(avctx, AV_LOG_ERROR, "Scalable inter frame in non scaleable stream\n");
av_log(avctx, AV_LOG_ERROR, "Scalable inter frame in non scalable stream\n");
ctx->frame_type = FRAMETYPE_INTER;
return AVERROR_INVALIDDATA;
}

View File

@ -249,7 +249,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
else
reslevel->nbands = 3;
/* Number of precincts wich span the tile for resolution level reslevelno
/* Number of precincts which span the tile for resolution level reslevelno
* see B.6 in ISO/IEC 15444-1:2002 eq. B-16
* num_precincts_x = |- trx_1 / 2 ^ log2_prec_width) -| - (trx_0 / 2 ^ log2_prec_width)
* num_precincts_y = |- try_1 / 2 ^ log2_prec_width) -| - (try_0 / 2 ^ log2_prec_width)

View File

@ -66,7 +66,7 @@
/* FF_GL_RED_COMPONENT is used for plannar pixel types.
* Only red component is sampled in shaders.
* On some platforms GL_RED is not availabe and GL_LUMINANCE have to be used,
* On some platforms GL_RED is not available and GL_LUMINANCE have to be used,
* but since OpenGL 3.0 GL_LUMINANCE is deprecated.
* GL_RED produces RGBA = value, 0, 0, 1.
* GL_LUMINANCE produces RGBA = value, value, value, 1.
@ -583,7 +583,7 @@ static void opengl_make_ortho(float matrix[16], float left, float right,
static av_cold int opengl_read_limits(OpenGLContext *opengl)
{
static const struct{
const char *extention;
const char *extension;
int major;
int minor;
} required_extensions[] = {
@ -603,12 +603,12 @@ static av_cold int opengl_read_limits(OpenGLContext *opengl)
av_log(opengl, AV_LOG_DEBUG, "OpenGL version: %s\n", version);
sscanf(version, "%d.%d", &major, &minor);
for (i = 0; required_extensions[i].extention; i++) {
for (i = 0; required_extensions[i].extension; i++) {
if (major < required_extensions[i].major &&
(major == required_extensions[i].major && minor < required_extensions[i].minor) &&
!strstr(extensions, required_extensions[i].extention)) {
!strstr(extensions, required_extensions[i].extension)) {
av_log(opengl, AV_LOG_ERROR, "Required extension %s is not supported.\n",
required_extensions[i].extention);
required_extensions[i].extension);
av_log(opengl, AV_LOG_DEBUG, "Supported extensions are: %s\n", extensions);
return AVERROR(ENOSYS);
}

View File

@ -138,7 +138,7 @@ int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
* Add a frame to the buffer source.
*
* By default, if the frame is reference-counted, this function will take
* ownership of the reference(s) and reset the frame. This can be controled
* ownership of the reference(s) and reset the frame. This can be controlled
* using the flags.
*
* If this function returns an error, the input frame is not touched.

View File

@ -103,7 +103,7 @@ typedef struct vf_seteq_s
#define VFCTRL_CHANGE_RECTANGLE 9 /* Change the rectangle boundaries */
#define VFCTRL_FLIP_PAGE 10 /* Tell the vo to flip pages */
#define VFCTRL_DUPLICATE_FRAME 11 /* For encoding - encode zero-change frame */
#define VFCTRL_SKIP_NEXT_FRAME 12 /* For encoding - drop the next frame that passes thru */
#define VFCTRL_SKIP_NEXT_FRAME 12 /* For encoding - drop the next frame that passes through */
#define VFCTRL_FLUSH_FRAMES 13 /* For encoding - flush delayed frames */
#define VFCTRL_SCREENSHOT 14 /* Make a screenshot */
#define VFCTRL_INIT_EOSD 15 /* Select EOSD renderer */

View File

@ -24,7 +24,7 @@
* "Aria Nosratinia Embedded Post-Processing for
* Enhancement of Compressed Images (1999)"
* (http://citeseer.nj.nec.com/nosratinia99embedded.html)
* Futher, with splitting (i)dct into hor/ver passes, one of them can be
* Further, with splitting (i)dct into hor/ver passes, one of them can be
* performed once per block, not pixel. This allows for much better speed.
*/

View File

@ -210,7 +210,7 @@ int ff_mp_msg_test(int mod, int lev){
void ff_init_avcodec(void)
{
//we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here
//we maybe should init but its kinda 1. unneeded 2. a bit impolite from here
}
//Exact copy of vf.c

View File

@ -201,7 +201,7 @@ static void blur(uint8_t *dst, const int dst_linesize,
if (diff > 2 * threshold)
dst[x + y * dst_linesize] = orig;
else if (diff > threshold)
/* add 'diff' and substract 'threshold' from 'filtered' */
/* add 'diff' and subtract 'threshold' from 'filtered' */
dst[x + y * dst_linesize] = orig - threshold;
} else {
if (-diff > 2 * threshold)
@ -223,13 +223,13 @@ static void blur(uint8_t *dst, const int dst_linesize,
if (diff <= -threshold)
dst[x + y * dst_linesize] = orig;
else if (diff <= -2 * threshold)
/* substract 'diff' and 'threshold' from 'orig' */
/* subtract 'diff' and 'threshold' from 'orig' */
dst[x + y * dst_linesize] = filtered - threshold;
} else {
if (diff >= threshold)
dst[x + y * dst_linesize] = orig;
else if (diff >= 2 * threshold)
/* add 'threshold' and substract 'diff' from 'orig' */
/* add 'threshold' and subtract 'diff' from 'orig' */
dst[x + y * dst_linesize] = filtered + threshold;
}
}

View File

@ -93,7 +93,7 @@ int ffio_set_buf_size(AVIOContext *s, int buf_size);
*
* Will ensure that when reading sequentially up to buf_size, seeking
* within the current pos and pos+buf_size is possible.
* Once the stream position moves outside this window this gurantee is lost.
* Once the stream position moves outside this window this guarantee is lost.
*/
int ffio_ensure_seekback(AVIOContext *s, int buf_size);

View File

@ -200,7 +200,7 @@ static int read_header(AVFormatContext *s)
goto fail;
}
if (b->adpc) {
av_log(s, AV_LOG_WARNING, "skipping additonal ADPC chunk\n");
av_log(s, AV_LOG_WARNING, "skipping additional ADPC chunk\n");
goto skip;
} else {
b->adpc = av_mallocz(asize);

View File

@ -510,7 +510,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
if (ffm_read_data(s, ffm->header+16, 4, 1) != 4)
return -1;
ffm->read_state = READ_DATA;
/* fall thru */
/* fall through */
case READ_DATA:
size = AV_RB24(ffm->header + 2);
if ((ret = ffm_is_avail_data(s, size)) < 0)

View File

@ -799,7 +799,7 @@ static void hvcc_init(HEVCDecoderConfigurationRecord *hvcc)
/*
* Initialize this field with an invalid value which can be used to detect
* whether we didn't see any VUI (in wich case it should be reset to zero).
* whether we didn't see any VUI (in which case it should be reset to zero).
*/
hvcc->min_spatial_segmentation_idc = MAX_SPATIAL_SEGMENTATION + 1;
}

View File

@ -464,7 +464,7 @@ static int mpegps_read_packet(AVFormatContext *s,
MpegDemuxContext *m = s->priv_data;
AVStream *st;
int len, startcode, i, es_type, ret;
int lpcm_header_len = -1; //Init to supress warning
int lpcm_header_len = -1; //Init to suppress warning
int request_probe= 0;
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVMediaType type;

View File

@ -356,7 +356,7 @@ int main(void)
av_bprint_init(&b, 0, 1);
bprint_pascal(&b, 25);
printf("Long text in automatic buffer: %u/%u\n", (unsigned)strlen(b.str)/8*8, b.len);
/* Note that the size of the automatic buffer is arch-dependant. */
/* Note that the size of the automatic buffer is arch-dependent. */
av_bprint_init(&b, 0, 0);
bprint_pascal(&b, 25);

View File

@ -146,7 +146,7 @@ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int
/**
* Add a value to a timestamp.
*
* This function gurantees that when the same value is repeatly added that
* This function guarantees that when the same value is repeatly added that
* no accumulation of rounding errors occurs.
*
* @param ts Input timestamp

View File

@ -257,7 +257,7 @@ enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
*
* See av_get_chroma_sub_sample() for a function that asserts a
* valid pixel format instead of returning an error code.
* Its recommanded that you use avcodec_get_chroma_sub_sample unless
* Its recommended that you use avcodec_get_chroma_sub_sample unless
* you do check the return code!
*
* @param[in] pix_fmt the pixel format

View File

@ -164,7 +164,7 @@ static void audiogen(void *data, enum AVSampleFormat sample_fmt,
a += M_PI * 1000.0 * 2.0 / sample_rate;
}
/* 1 second of varing frequency between 100 and 10000 Hz */
/* 1 second of varying frequency between 100 and 10000 Hz */
a = 0;
for (i = 0; i < 1 * sample_rate && k < nb_samples; i++, k++) {
v = sin(a) * 0.30;

View File

@ -67,7 +67,7 @@ $EGREP $OPT '^\+ *(const *|)static' $*| $EGREP --color=always '[^=]= *(0|NULL)[^
cat $TMP
hiegrep '# *ifdef * (HAVE|CONFIG)_' 'ifdefs that should be #if' $*
hiegrep '\b(awnser|cant|dont|wont|doesnt|usefull|successfull|occured|teh|alot|wether|skiped|skiping|heigth|informations|colums|loosy|loosing|ouput|seperate|preceed|upto|paket|posible|unkown|inpossible|dimention|acheive|funtions|overriden|outputing|seperation|initalize|compatibilty|bistream|knwon|unknwon)\b' 'common typos' $*
hiegrep '\b(awnser|cant|dont|wont|doesnt|usefull|successfull|occured|teh|alot|wether|skiped|skiping|heigth|informations|colums|loosy|loosing|ouput|seperate|preceed|upto|paket|posible|unkown|inpossible|dimention|acheive|funtions|overriden|outputing|seperation|initalize|compatibilty|bistream|knwon|unknwon|choosen|additonal|gurantee|availble|wich)\b' 'common typos' $*
hiegrep 'av_log\( *NULL' 'Missing context in av_log' $*
hiegrep '[^sn]printf' 'Please use av_log' $*