avfilter: add Affine Projection adaptive audio filter

This commit is contained in:
Paul B Mahol 2023-04-30 17:06:00 +02:00
parent cc86343b96
commit f66536cc58
7 changed files with 620 additions and 1 deletions

View File

@ -6,6 +6,7 @@ version <next>:
- EVC decoding using external library libxevd
- EVC encoding using external library libxeve
- QOA decoder and demuxer
- aap filter
version 6.1:
- libaribcaption decoder

View File

@ -418,6 +418,63 @@ build.
Below is a description of the currently available audio filters.
@section aap
Apply Affine Projection algorithm to the first audio stream using the second audio stream.
This adaptive filter is used to estimate unknown audio based on multiple input audio samples.
Affine projection algorithm can make trade-offs between computation complexity with convergence speed.
A description of the accepted options follows.
@table @option
@item order
Set the filter order.
@item projection
Set the projection order.
@item mu
Set the filter mu.
@item delta
Set the coefficient to initialize internal covariance matrix.
@item out_mode
Set the filter output samples. It accepts the following values:
@table @option
@item i
Pass the 1st input.
@item d
Pass the 2nd input.
@item o
Pass difference between desired, 2nd input and error signal estimate.
@item n
Pass difference between input, 1st input and error signal estimate.
@item e
Pass error signal estimated samples.
Default value is @var{o}.
@end table
@item precision
Set which precision to use when processing samples.
@table @option
@item auto
Auto pick internal sample format depending on other filters.
@item float
Always use single-floating point precision sample format.
@item double
Always use double-floating point precision sample format.
@end table
@end table
@section acompressor
A compressor is mainly used to reduce the dynamic range of a signal.

View File

@ -35,6 +35,7 @@ OBJS-$(CONFIG_DNN) += dnn_filter_common.o
include $(SRC_PATH)/libavfilter/dnn/Makefile
# audio filters
OBJS-$(CONFIG_AAP_FILTER) += af_aap.o
OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
OBJS-$(CONFIG_ACOMPRESSOR_FILTER) += af_sidechaincompress.o
OBJS-$(CONFIG_ACONTRAST_FILTER) += af_acontrast.o

227
libavfilter/aap_template.c Normal file
View File

@ -0,0 +1,227 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#undef ZERO
#undef ONE
#undef ftype
#undef SAMPLE_FORMAT
#if DEPTH == 32
#define SAMPLE_FORMAT float
#define ftype float
#define ONE 1.f
#define ZERO 0.f
#else
#define SAMPLE_FORMAT double
#define ftype double
#define ONE 1.0
#define ZERO 0.0
#endif
#define fn3(a,b) a##_##b
#define fn2(a,b) fn3(a,b)
#define fn(a) fn2(a, SAMPLE_FORMAT)
#if DEPTH == 64
static double scalarproduct_double(const double *v1, const double *v2, int len)
{
double p = 0.0;
for (int i = 0; i < len; i++)
p += v1[i] * v2[i];
return p;
}
#endif
static ftype fn(fir_sample)(AudioAPContext *s, ftype sample, ftype *delay,
ftype *coeffs, ftype *tmp, int *offset)
{
const int order = s->order;
ftype output;
delay[*offset] = sample;
memcpy(tmp, coeffs + order - *offset, order * sizeof(ftype));
#if DEPTH == 32
output = s->fdsp->scalarproduct_float(delay, tmp, s->kernel_size);
#else
output = scalarproduct_double(delay, tmp, s->kernel_size);
#endif
if (--(*offset) < 0)
*offset = order - 1;
return output;
}
static int fn(lup_decompose)(ftype **MA, const int N, const ftype tol, int *P)
{
for (int i = 0; i <= N; i++)
P[i] = i;
for (int i = 0; i < N; i++) {
ftype maxA = ZERO;
int imax = i;
for (int k = i; k < N; k++) {
ftype absA = fabs(MA[k][i]);
if (absA > maxA) {
maxA = absA;
imax = k;
}
}
if (maxA < tol)
return 0;
if (imax != i) {
FFSWAP(int, P[i], P[imax]);
FFSWAP(ftype *, MA[i], MA[imax]);
P[N]++;
}
for (int j = i + 1; j < N; j++) {
MA[j][i] /= MA[i][i];
for (int k = i + 1; k < N; k++)
MA[j][k] -= MA[j][i] * MA[i][k];
}
}
return 1;
}
static void fn(lup_invert)(ftype *const *MA, const int *P, const int N, ftype **IA)
{
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
IA[i][j] = P[i] == j ? ONE : ZERO;
for (int k = 0; k < i; k++)
IA[i][j] -= MA[i][k] * IA[k][j];
}
for (int i = N - 1; i >= 0; i--) {
for (int k = i + 1; k < N; k++)
IA[i][j] -= MA[i][k] * IA[k][j];
IA[i][j] /= MA[i][i];
}
}
}
static ftype fn(process_sample)(AudioAPContext *s, ftype input, ftype desired, int ch)
{
ftype *dcoeffs = (ftype *)s->dcoeffs->extended_data[ch];
ftype *coeffs = (ftype *)s->coeffs->extended_data[ch];
ftype *delay = (ftype *)s->delay->extended_data[ch];
ftype **itmpmp = (ftype **)&s->itmpmp[s->projection * ch];
ftype **tmpmp = (ftype **)&s->tmpmp[s->projection * ch];
ftype *tmpm = (ftype *)s->tmpm->extended_data[ch];
ftype *tmp = (ftype *)s->tmp->extended_data[ch];
ftype *e = (ftype *)s->e->extended_data[ch];
ftype *x = (ftype *)s->x->extended_data[ch];
ftype *w = (ftype *)s->w->extended_data[ch];
int *p = (int *)s->p->extended_data[ch];
int *offset = (int *)s->offset->extended_data[ch];
const int projection = s->projection;
const ftype delta = s->delta;
const int order = s->order;
const int length = projection + order;
const ftype mu = s->mu;
const ftype tol = 0.00001f;
ftype output;
x[offset[2] + length] = x[offset[2]] = input;
delay[offset[0] + order] = input;
output = fn(fir_sample)(s, input, delay, coeffs, tmp, offset);
e[offset[1]] = e[offset[1] + projection] = desired - output;
for (int i = 0; i < projection; i++) {
const int iprojection = i * projection;
for (int j = i; j < projection; j++) {
ftype sum = ZERO;
for (int k = 0; k < order; k++)
sum += x[offset[2] + i + k] * x[offset[2] + j + k];
tmpm[iprojection + j] = sum;
if (i != j)
tmpm[j * projection + i] = sum;
}
tmpm[iprojection + i] += delta;
}
fn(lup_decompose)(tmpmp, projection, tol, p);
fn(lup_invert)(tmpmp, p, projection, itmpmp);
for (int i = 0; i < projection; i++) {
ftype sum = ZERO;
for (int j = 0; j < projection; j++)
sum += itmpmp[i][j] * e[j + offset[1]];
w[i] = sum;
}
for (int i = 0; i < order; i++) {
ftype sum = ZERO;
for (int j = 0; j < projection; j++)
sum += x[offset[2] + i + j] * w[j];
dcoeffs[i] = sum;
}
for (int i = 0; i < order; i++)
coeffs[i] = coeffs[i + order] = coeffs[i] + mu * dcoeffs[i];
if (--offset[1] < 0)
offset[1] = projection - 1;
if (--offset[2] < 0)
offset[2] = length - 1;
switch (s->output_mode) {
case IN_MODE: output = input; break;
case DESIRED_MODE: output = desired; break;
case OUT_MODE: output = desired - output; break;
case NOISE_MODE: output = input - output; break;
case ERROR_MODE: break;
}
return output;
}
static int fn(filter_channels)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AudioAPContext *s = ctx->priv;
AVFrame *out = arg;
const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
for (int c = start; c < end; c++) {
const ftype *input = (const ftype *)s->frame[0]->extended_data[c];
const ftype *desired = (const ftype *)s->frame[1]->extended_data[c];
ftype *output = (ftype *)out->extended_data[c];
for (int n = 0; n < out->nb_samples; n++) {
output[n] = fn(process_sample)(s, input[n], desired[n], c);
if (ctx->is_disabled)
output[n] = input[n];
}
}
return 0;
}

332
libavfilter/af_aap.c Normal file
View File

@ -0,0 +1,332 @@
/*
* Copyright (c) 2023 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "filters.h"
#include "internal.h"
enum OutModes {
IN_MODE,
DESIRED_MODE,
OUT_MODE,
NOISE_MODE,
ERROR_MODE,
NB_OMODES
};
typedef struct AudioAPContext {
const AVClass *class;
int order;
int projection;
float mu;
float delta;
int output_mode;
int precision;
int kernel_size;
AVFrame *offset;
AVFrame *delay;
AVFrame *coeffs;
AVFrame *e;
AVFrame *p;
AVFrame *x;
AVFrame *w;
AVFrame *dcoeffs;
AVFrame *tmp;
AVFrame *tmpm;
AVFrame *itmpm;
void **tmpmp;
void **itmpmp;
AVFrame *frame[2];
int (*filter_channels)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
AVFloatDSPContext *fdsp;
} AudioAPContext;
#define OFFSET(x) offsetof(AudioAPContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption aap_options[] = {
{ "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=16}, 1, INT16_MAX, A },
{ "projection", "set the filter projection", OFFSET(projection), AV_OPT_TYPE_INT, {.i64=2}, 1, 256, A },
{ "mu", "set the filter mu", OFFSET(mu), AV_OPT_TYPE_FLOAT, {.dbl=0.0001},0,1, AT },
{ "delta", "set the filter delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0.001},0, 1, AT },
{ "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, AT, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AT, "mode" },
{ "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, AT, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AT, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, AT, "mode" },
{ "e", "error", 0, AV_OPT_TYPE_CONST, {.i64=ERROR_MODE}, 0, 0, AT, "mode" },
{ "precision", "set processing precision", OFFSET(precision), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, A, "precision" },
{ "auto", "set auto processing precision", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "precision" },
{ "float", "set single-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "precision" },
{ "double","set double-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "precision" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aap);
static int query_formats(AVFilterContext *ctx)
{
AudioAPContext *s = ctx->priv;
static const enum AVSampleFormat sample_fmts[3][3] = {
{ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
{ AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE },
{ AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE },
};
int ret;
if ((ret = ff_set_common_all_channel_counts(ctx)) < 0)
return ret;
if ((ret = ff_set_common_formats_from_list(ctx, sample_fmts[s->precision])) < 0)
return ret;
return ff_set_common_all_samplerates(ctx);
}
static int activate(AVFilterContext *ctx)
{
AudioAPContext *s = ctx->priv;
int i, ret, status;
int nb_samples;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
ff_inlink_queued_samples(ctx->inputs[1]));
for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
if (s->frame[i])
continue;
if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]);
if (ret < 0)
return ret;
}
}
if (s->frame[0] && s->frame[1]) {
AVFrame *out;
out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples);
if (!out) {
av_frame_free(&s->frame[0]);
av_frame_free(&s->frame[1]);
return AVERROR(ENOMEM);
}
ff_filter_execute(ctx, s->filter_channels, out, NULL,
FFMIN(ctx->outputs[0]->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
out->pts = s->frame[0]->pts;
out->duration = s->frame[0]->duration;
av_frame_free(&s->frame[0]);
av_frame_free(&s->frame[1]);
ret = ff_filter_frame(ctx->outputs[0], out);
if (ret < 0)
return ret;
}
if (!nb_samples) {
for (i = 0; i < 2; i++) {
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
}
}
}
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
for (i = 0; i < 2; i++) {
if (s->frame[i] || ff_inlink_queued_samples(ctx->inputs[i]) > 0)
continue;
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
return 0;
}
#define DEPTH 32
#include "aap_template.c"
#undef DEPTH
#define DEPTH 64
#include "aap_template.c"
static int config_output(AVFilterLink *outlink)
{
const int channels = outlink->ch_layout.nb_channels;
AVFilterContext *ctx = outlink->src;
AudioAPContext *s = ctx->priv;
s->kernel_size = FFALIGN(s->order, 16);
if (!s->offset)
s->offset = ff_get_audio_buffer(outlink, 3);
if (!s->delay)
s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
if (!s->dcoeffs)
s->dcoeffs = ff_get_audio_buffer(outlink, s->kernel_size);
if (!s->coeffs)
s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
if (!s->e)
s->e = ff_get_audio_buffer(outlink, 2 * s->projection);
if (!s->p)
s->p = ff_get_audio_buffer(outlink, s->projection + 1);
if (!s->x)
s->x = ff_get_audio_buffer(outlink, 2 * (s->projection + s->order));
if (!s->w)
s->w = ff_get_audio_buffer(outlink, s->projection);
if (!s->tmp)
s->tmp = ff_get_audio_buffer(outlink, s->kernel_size);
if (!s->tmpm)
s->tmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
if (!s->itmpm)
s->itmpm = ff_get_audio_buffer(outlink, s->projection * s->projection);
if (!s->tmpmp)
s->tmpmp = av_calloc(s->projection * channels, sizeof(*s->tmpmp));
if (!s->itmpmp)
s->itmpmp = av_calloc(s->projection * channels, sizeof(*s->itmpmp));
if (!s->offset || !s->delay || !s->dcoeffs || !s->coeffs || !s->tmpmp || !s->itmpmp ||
!s->e || !s->p || !s->x || !s->w || !s->tmp || !s->tmpm || !s->itmpm)
return AVERROR(ENOMEM);
switch (outlink->format) {
case AV_SAMPLE_FMT_DBLP:
for (int ch = 0; ch < channels; ch++) {
double *itmpm = (double *)s->itmpm->extended_data[ch];
double *tmpm = (double *)s->tmpm->extended_data[ch];
double **itmpmp = (double **)&s->itmpmp[s->projection * ch];
double **tmpmp = (double **)&s->tmpmp[s->projection * ch];
for (int i = 0; i < s->projection; i++) {
itmpmp[i] = &itmpm[i * s->projection];
tmpmp[i] = &tmpm[i * s->projection];
}
}
s->filter_channels = filter_channels_double;
break;
case AV_SAMPLE_FMT_FLTP:
for (int ch = 0; ch < channels; ch++) {
float *itmpm = (float *)s->itmpm->extended_data[ch];
float *tmpm = (float *)s->tmpm->extended_data[ch];
float **itmpmp = (float **)&s->itmpmp[s->projection * ch];
float **tmpmp = (float **)&s->tmpmp[s->projection * ch];
for (int i = 0; i < s->projection; i++) {
itmpmp[i] = &itmpm[i * s->projection];
tmpmp[i] = &tmpm[i * s->projection];
}
}
s->filter_channels = filter_channels_float;
break;
}
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
AudioAPContext *s = ctx->priv;
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioAPContext *s = ctx->priv;
av_freep(&s->fdsp);
av_frame_free(&s->offset);
av_frame_free(&s->delay);
av_frame_free(&s->dcoeffs);
av_frame_free(&s->coeffs);
av_frame_free(&s->e);
av_frame_free(&s->p);
av_frame_free(&s->w);
av_frame_free(&s->x);
av_frame_free(&s->tmp);
av_frame_free(&s->tmpm);
av_frame_free(&s->itmpm);
av_freep(&s->tmpmp);
av_freep(&s->itmpmp);
}
static const AVFilterPad inputs[] = {
{
.name = "input",
.type = AVMEDIA_TYPE_AUDIO,
},
{
.name = "desired",
.type = AVMEDIA_TYPE_AUDIO,
},
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
};
const AVFilter ff_af_aap = {
.name = "aap",
.description = NULL_IF_CONFIG_SMALL("Apply Affine Projection algorithm to first audio stream."),
.priv_size = sizeof(AudioAPContext),
.priv_class = &aap_class,
.init = init,
.uninit = uninit,
.activate = activate,
FILTER_INPUTS(inputs),
FILTER_OUTPUTS(outputs),
FILTER_QUERY_FUNC(query_formats),
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
AVFILTER_FLAG_SLICE_THREADS,
.process_command = ff_filter_process_command,
};

View File

@ -21,6 +21,7 @@
#include "avfilter.h"
extern const AVFilter ff_af_aap;
extern const AVFilter ff_af_abench;
extern const AVFilter ff_af_acompressor;
extern const AVFilter ff_af_acontrast;

View File

@ -31,7 +31,7 @@
#include "version_major.h"
#define LIBAVFILTER_VERSION_MINOR 13
#define LIBAVFILTER_VERSION_MINOR 14
#define LIBAVFILTER_VERSION_MICRO 100