GIF encoding now handled by ffmpeg, new RIFE and RIFE-NCNN pkgs

This commit is contained in:
N00MKRAD 2020-12-08 14:43:03 +01:00
parent e807f174c0
commit e77933b971
25 changed files with 1208 additions and 1014 deletions

3
.gitignore vendored
View File

@ -31,6 +31,9 @@ bld/
[Ll]og/
[Ll]ogs/
# NMKD Python Redist Pkg
[Pp]y*/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot

View File

@ -155,7 +155,7 @@ namespace Flowframes
DeleteSource(inputPath);
}
public static async void FramesToApng (string inputDir, bool opti, int fps, string prefix, bool delSrc)
public static async void FramesToApng (string inputDir, bool opti, int fps, string prefix, bool delSrc = false)
{
int nums = IOUtils.GetFilenameCounterLength(Directory.GetFiles(inputDir, "*.png")[0], prefix);
string filter = "";
@ -166,18 +166,26 @@ namespace Flowframes
DeleteSource(inputDir);
}
public static async void FramesToGif (string inputDir, bool opti, int fps, string prefix, bool delSrc)
public static async void FramesToGif (string inputDir, bool palette, int fps, string prefix, bool delSrc = false)
{
int nums = IOUtils.GetFilenameCounterLength(Directory.GetFiles(inputDir, "*.png")[0], prefix);
string filter = "";
if (opti) filter = "-vf \"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\"";
string filter = palette ? "-vf \"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\"" : "";
string args = "-framerate " + fps + " -i \"" + inputDir + "\\" + prefix + "%0" + nums + "d.png\" -f gif " + filter + " \"" + inputDir + ".gif\"";
await AvProcess.RunFfmpeg(args, AvProcess.LogMode.OnlyLastLine);
if (delSrc)
DeleteSource(inputDir);
}
public static async Task LoopVideo (string inputFile, int times, bool delSrc)
public static async Task FramesToGifVfr(string framesFile, string outPath, bool palette)
{
Logger.Log($"Encoding GIF...");
string vfrFilename = Path.GetFileName(framesFile);
string filter = palette ? "-vf \"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse\"" : "";
string args = $"-f concat -i {vfrFilename.Wrap()} -f gif {filter} {outPath.Wrap()}";
await AvProcess.RunFfmpeg(args, framesFile.GetParentDir(), AvProcess.LogMode.OnlyLastLine);
}
public static async Task LoopVideo (string inputFile, int times, bool delSrc = false)
{
string pathNoExt = Path.ChangeExtension(inputFile, null);
string ext = Path.GetExtension(inputFile);
@ -187,7 +195,7 @@ namespace Flowframes
DeleteSource(inputFile);
}
public static async Task LoopVideoEnc (string inputFile, int times, bool useH265, int crf, bool delSrc)
public static async Task LoopVideoEnc (string inputFile, int times, bool useH265, int crf, bool delSrc = false)
{
string pathNoExt = Path.ChangeExtension(inputFile, null);
string ext = Path.GetExtension(inputFile);
@ -199,7 +207,7 @@ namespace Flowframes
DeleteSource(inputFile);
}
public static async Task ChangeSpeed (string inputFile, float newSpeedPercent, bool delSrc)
public static async Task ChangeSpeed (string inputFile, float newSpeedPercent, bool delSrc = false)
{
string pathNoExt = Path.ChangeExtension(inputFile, null);
string ext = Path.GetExtension(inputFile);

15
Code/Form1.Designer.cs generated
View File

@ -700,7 +700,7 @@
this.updateBtn.ForeColor = System.Drawing.Color.White;
this.updateBtn.ImageIndex = 0;
this.updateBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.updateBtn.Location = new System.Drawing.Point(744, 12);
this.updateBtn.Location = new System.Drawing.Point(790, 12);
this.updateBtn.Name = "updateBtn";
this.updateBtn.Size = new System.Drawing.Size(40, 40);
this.updateBtn.TabIndex = 41;
@ -718,7 +718,7 @@
this.queueBtn.ForeColor = System.Drawing.Color.White;
this.queueBtn.ImageIndex = 0;
this.queueBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.queueBtn.Location = new System.Drawing.Point(790, 12);
this.queueBtn.Location = new System.Drawing.Point(836, 12);
this.queueBtn.Name = "queueBtn";
this.queueBtn.Size = new System.Drawing.Size(40, 40);
this.queueBtn.TabIndex = 39;
@ -840,7 +840,7 @@
this.patreonBtn.ForeColor = System.Drawing.Color.White;
this.patreonBtn.ImageIndex = 0;
this.patreonBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.patreonBtn.Location = new System.Drawing.Point(606, 12);
this.patreonBtn.Location = new System.Drawing.Point(652, 12);
this.patreonBtn.Name = "patreonBtn";
this.patreonBtn.Size = new System.Drawing.Size(40, 40);
this.patreonBtn.TabIndex = 37;
@ -858,7 +858,7 @@
this.paypalBtn.ForeColor = System.Drawing.Color.White;
this.paypalBtn.ImageIndex = 0;
this.paypalBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.paypalBtn.Location = new System.Drawing.Point(559, 12);
this.paypalBtn.Location = new System.Drawing.Point(605, 12);
this.paypalBtn.Name = "paypalBtn";
this.paypalBtn.Size = new System.Drawing.Size(40, 40);
this.paypalBtn.TabIndex = 36;
@ -876,7 +876,7 @@
this.discordBtn.ForeColor = System.Drawing.Color.White;
this.discordBtn.ImageIndex = 0;
this.discordBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.discordBtn.Location = new System.Drawing.Point(652, 12);
this.discordBtn.Location = new System.Drawing.Point(698, 12);
this.discordBtn.Name = "discordBtn";
this.discordBtn.Size = new System.Drawing.Size(40, 40);
this.discordBtn.TabIndex = 35;
@ -894,12 +894,13 @@
this.installerBtn.ForeColor = System.Drawing.Color.White;
this.installerBtn.ImageIndex = 0;
this.installerBtn.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.installerBtn.Location = new System.Drawing.Point(836, 12);
this.installerBtn.Location = new System.Drawing.Point(468, 12);
this.installerBtn.Name = "installerBtn";
this.installerBtn.Size = new System.Drawing.Size(40, 40);
this.installerBtn.TabIndex = 9;
this.toolTip1.SetToolTip(this.installerBtn, "Open Package Installer");
this.installerBtn.UseVisualStyleBackColor = false;
this.installerBtn.Visible = false;
this.installerBtn.Click += new System.EventHandler(this.installerBtn_Click);
//
// longProgBar
@ -1298,7 +1299,7 @@
this.htButton1.ForeColor = System.Drawing.Color.White;
this.htButton1.ImageIndex = 0;
this.htButton1.ImageSizeMode = HTAlt.WinForms.HTButton.ButtonImageSizeMode.Zoom;
this.htButton1.Location = new System.Drawing.Point(698, 12);
this.htButton1.Location = new System.Drawing.Point(744, 12);
this.htButton1.Name = "htButton1";
this.htButton1.Size = new System.Drawing.Size(40, 40);
this.htButton1.TabIndex = 40;

View File

@ -66,14 +66,18 @@ namespace Flowframes.Main
static async Task Encode(i.OutMode mode, string framesPath, string outPath, float fps, float changeFps = -1, bool keepOriginalFpsVid = true)
{
string vfrFile = Path.Combine(framesPath.GetParentDir(), $"vfr-{i.lastInterpFactor}x.ini");
if (mode == i.OutMode.VidGif)
{
if (new DirectoryInfo(framesPath).GetFiles()[0].Extension != ".png")
{
Logger.Log("Converting output frames to PNG to encode with Gifski...");
await Converter.Convert(framesPath, ImageMagick.MagickFormat.Png00, 20, "png", false);
}
await GifskiCommands.CreateGifFromFrames(i.currentOutFps.RoundToInt(), Config.GetInt("gifskiQ"), framesPath, outPath);
await FFmpegCommands.FramesToGifVfr(vfrFile, outPath, true);
// TODO: Remove old code once new code works well
// if (new DirectoryInfo(framesPath).GetFiles()[0].Extension != ".png")
// {
// Logger.Log("Converting output frames to PNG to encode with Gifski...");
// await Converter.Convert(framesPath, ImageMagick.MagickFormat.Png00, 20, "png", false);
// }
// await GifskiCommands.CreateGifFromFrames(i.currentOutFps.RoundToInt(), Config.GetInt("gifskiQ"), framesPath, outPath);
}
if (mode == i.OutMode.VidMp4)
@ -82,7 +86,6 @@ namespace Flowframes.Main
bool h265 = Config.GetInt("mp4Enc") == 1;
int crf = h265 ? Config.GetInt("h265Crf") : Config.GetInt("h264Crf");
string vfrFile = Path.Combine(framesPath.GetParentDir(), $"vfr-{i.lastInterpFactor}x.ini");
await FFmpegCommands.FramesToMp4Vfr(vfrFile, outPath, h265, crf, fps, i.constantFrameRate);
/* DELETE THIS AS SOON AS I'M SURE I CAN USE VFR WITH TIMING DISABLED

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,686 +0,0 @@
7767517
684 810
Input x.1 0 1 x.1
Input x.3 0 1 x.3
Reorg Reorg_13 1 1 x.1 523 0=8
Reorg Reorg_27 1 1 x.3 551 0=8
Concat Concat_28 2 1 523 551 552
Convolution Conv_29 1 1 552 553 0=192 1=3 4=1 5=1 6=663552
Split splitncnn_0 1 4 553 553_splitncnn_0 553_splitncnn_1 553_splitncnn_2 553_splitncnn_3
Padding Pad_43 1 1 553_splitncnn_3 576 0=1 1=1 2=1 3=1 4=2
Convolution Conv_44 1 1 576 578 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_59 1 1 578 601 0=1 1=1 2=1 3=1 4=2
Convolution Conv_60 1 1 601 602 0=192 1=3 5=1 6=331776
Split splitncnn_1 1 2 602 602_splitncnn_0 602_splitncnn_1
Pooling GlobalAveragePool_61 1 1 602_splitncnn_1 603 0=1 4=1
InnerProduct Conv_62 1 1 603 605 0=12 1=1 2=2304 9=1
InnerProduct Conv_64 1 1 605 607 0=192 1=1 2=2304 9=4
BinaryOp Mul_66 2 1 602_splitncnn_0 607 608 0=2
BinaryOp Add_67 2 1 608 553_splitncnn_2 609
Split splitncnn_2 1 2 609 609_splitncnn_0 609_splitncnn_1
Padding Pad_81 1 1 609_splitncnn_1 632 0=1 1=1 2=1 3=1 4=2
Convolution Conv_82 1 1 632 634 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_97 1 1 634 657 0=1 1=1 2=1 3=1 4=2
Convolution Conv_98 1 1 657 658 0=192 1=3 5=1 6=331776
Split splitncnn_3 1 2 658 658_splitncnn_0 658_splitncnn_1
Pooling GlobalAveragePool_99 1 1 658_splitncnn_1 659 0=1 4=1
InnerProduct Conv_100 1 1 659 661 0=12 1=1 2=2304 9=1
InnerProduct Conv_102 1 1 661 663 0=192 1=1 2=2304 9=4
BinaryOp Mul_104 2 1 658_splitncnn_0 663 664 0=2
BinaryOp Add_105 2 1 664 609_splitncnn_0 665
Split splitncnn_4 1 2 665 665_splitncnn_0 665_splitncnn_1
Padding Pad_119 1 1 665_splitncnn_1 688 0=1 1=1 2=1 3=1 4=2
Convolution Conv_120 1 1 688 690 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_135 1 1 690 713 0=1 1=1 2=1 3=1 4=2
Convolution Conv_136 1 1 713 714 0=192 1=3 5=1 6=331776
Split splitncnn_5 1 2 714 714_splitncnn_0 714_splitncnn_1
Pooling GlobalAveragePool_137 1 1 714_splitncnn_1 715 0=1 4=1
InnerProduct Conv_138 1 1 715 717 0=12 1=1 2=2304 9=1
InnerProduct Conv_140 1 1 717 719 0=192 1=1 2=2304 9=4
BinaryOp Mul_142 2 1 714_splitncnn_0 719 720 0=2
BinaryOp Add_143 2 1 720 665_splitncnn_0 721
Split splitncnn_6 1 2 721 721_splitncnn_0 721_splitncnn_1
Padding Pad_157 1 1 721_splitncnn_1 744 0=1 1=1 2=1 3=1 4=2
Convolution Conv_158 1 1 744 746 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_173 1 1 746 769 0=1 1=1 2=1 3=1 4=2
Convolution Conv_174 1 1 769 770 0=192 1=3 5=1 6=331776
Split splitncnn_7 1 2 770 770_splitncnn_0 770_splitncnn_1
Pooling GlobalAveragePool_175 1 1 770_splitncnn_1 771 0=1 4=1
InnerProduct Conv_176 1 1 771 773 0=12 1=1 2=2304 9=1
InnerProduct Conv_178 1 1 773 775 0=192 1=1 2=2304 9=4
BinaryOp Mul_180 2 1 770_splitncnn_0 775 776 0=2
BinaryOp Add_181 2 1 776 721_splitncnn_0 777
Split splitncnn_8 1 2 777 777_splitncnn_0 777_splitncnn_1
Padding Pad_195 1 1 777_splitncnn_1 800 0=1 1=1 2=1 3=1 4=2
Convolution Conv_196 1 1 800 802 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_211 1 1 802 825 0=1 1=1 2=1 3=1 4=2
Convolution Conv_212 1 1 825 826 0=192 1=3 5=1 6=331776
Split splitncnn_9 1 2 826 826_splitncnn_0 826_splitncnn_1
Pooling GlobalAveragePool_213 1 1 826_splitncnn_1 827 0=1 4=1
InnerProduct Conv_214 1 1 827 829 0=12 1=1 2=2304 9=1
InnerProduct Conv_216 1 1 829 831 0=192 1=1 2=2304 9=4
BinaryOp Mul_218 2 1 826_splitncnn_0 831 832 0=2
BinaryOp Add_219 2 1 832 777_splitncnn_0 833
Split splitncnn_10 1 2 833 833_splitncnn_0 833_splitncnn_1
Padding Pad_233 1 1 833_splitncnn_1 856 0=1 1=1 2=1 3=1 4=2
Convolution Conv_234 1 1 856 858 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_249 1 1 858 881 0=1 1=1 2=1 3=1 4=2
Convolution Conv_250 1 1 881 882 0=192 1=3 5=1 6=331776
Split splitncnn_11 1 2 882 882_splitncnn_0 882_splitncnn_1
Pooling GlobalAveragePool_251 1 1 882_splitncnn_1 883 0=1 4=1
InnerProduct Conv_252 1 1 883 885 0=12 1=1 2=2304 9=1
InnerProduct Conv_254 1 1 885 887 0=192 1=1 2=2304 9=4
BinaryOp Mul_256 2 1 882_splitncnn_0 887 888 0=2
BinaryOp Add_257 2 1 888 833_splitncnn_0 889
Split splitncnn_12 1 2 889 889_splitncnn_0 889_splitncnn_1
Padding Pad_271 1 1 889_splitncnn_1 912 0=1 1=1 2=1 3=1 4=2
Convolution Conv_272 1 1 912 914 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_287 1 1 914 937 0=1 1=1 2=1 3=1 4=2
Convolution Conv_288 1 1 937 938 0=192 1=3 5=1 6=331776
Split splitncnn_13 1 2 938 938_splitncnn_0 938_splitncnn_1
Pooling GlobalAveragePool_289 1 1 938_splitncnn_1 939 0=1 4=1
InnerProduct Conv_290 1 1 939 941 0=12 1=1 2=2304 9=1
InnerProduct Conv_292 1 1 941 943 0=192 1=1 2=2304 9=4
BinaryOp Mul_294 2 1 938_splitncnn_0 943 944 0=2
BinaryOp Add_295 2 1 944 889_splitncnn_0 945
Split splitncnn_14 1 2 945 945_splitncnn_0 945_splitncnn_1
Padding Pad_309 1 1 945_splitncnn_1 968 0=1 1=1 2=1 3=1 4=2
Convolution Conv_310 1 1 968 970 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_325 1 1 970 993 0=1 1=1 2=1 3=1 4=2
Convolution Conv_326 1 1 993 994 0=192 1=3 5=1 6=331776
Split splitncnn_15 1 2 994 994_splitncnn_0 994_splitncnn_1
Pooling GlobalAveragePool_327 1 1 994_splitncnn_1 995 0=1 4=1
InnerProduct Conv_328 1 1 995 997 0=12 1=1 2=2304 9=1
InnerProduct Conv_330 1 1 997 999 0=192 1=1 2=2304 9=4
BinaryOp Mul_332 2 1 994_splitncnn_0 999 1000 0=2
BinaryOp Add_333 2 1 1000 945_splitncnn_0 1001
Split splitncnn_16 1 2 1001 1001_splitncnn_0 1001_splitncnn_1
Padding Pad_347 1 1 1001_splitncnn_1 1024 0=1 1=1 2=1 3=1 4=2
Convolution Conv_348 1 1 1024 1026 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_363 1 1 1026 1049 0=1 1=1 2=1 3=1 4=2
Convolution Conv_364 1 1 1049 1050 0=192 1=3 5=1 6=331776
Split splitncnn_17 1 2 1050 1050_splitncnn_0 1050_splitncnn_1
Pooling GlobalAveragePool_365 1 1 1050_splitncnn_1 1051 0=1 4=1
InnerProduct Conv_366 1 1 1051 1053 0=12 1=1 2=2304 9=1
InnerProduct Conv_368 1 1 1053 1055 0=192 1=1 2=2304 9=4
BinaryOp Mul_370 2 1 1050_splitncnn_0 1055 1056 0=2
BinaryOp Add_371 2 1 1056 1001_splitncnn_0 1057
Split splitncnn_18 1 2 1057 1057_splitncnn_0 1057_splitncnn_1
Padding Pad_385 1 1 1057_splitncnn_1 1080 0=1 1=1 2=1 3=1 4=2
Convolution Conv_386 1 1 1080 1082 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_401 1 1 1082 1105 0=1 1=1 2=1 3=1 4=2
Convolution Conv_402 1 1 1105 1106 0=192 1=3 5=1 6=331776
Split splitncnn_19 1 2 1106 1106_splitncnn_0 1106_splitncnn_1
Pooling GlobalAveragePool_403 1 1 1106_splitncnn_1 1107 0=1 4=1
InnerProduct Conv_404 1 1 1107 1109 0=12 1=1 2=2304 9=1
InnerProduct Conv_406 1 1 1109 1111 0=192 1=1 2=2304 9=4
BinaryOp Mul_408 2 1 1106_splitncnn_0 1111 1112 0=2
BinaryOp Add_409 2 1 1112 1057_splitncnn_0 1113
Split splitncnn_20 1 2 1113 1113_splitncnn_0 1113_splitncnn_1
Padding Pad_423 1 1 1113_splitncnn_1 1136 0=1 1=1 2=1 3=1 4=2
Convolution Conv_424 1 1 1136 1138 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_439 1 1 1138 1161 0=1 1=1 2=1 3=1 4=2
Convolution Conv_440 1 1 1161 1162 0=192 1=3 5=1 6=331776
Split splitncnn_21 1 2 1162 1162_splitncnn_0 1162_splitncnn_1
Pooling GlobalAveragePool_441 1 1 1162_splitncnn_1 1163 0=1 4=1
InnerProduct Conv_442 1 1 1163 1165 0=12 1=1 2=2304 9=1
InnerProduct Conv_444 1 1 1165 1167 0=192 1=1 2=2304 9=4
BinaryOp Mul_446 2 1 1162_splitncnn_0 1167 1168 0=2
BinaryOp Add_447 2 1 1168 1113_splitncnn_0 1169
Split splitncnn_22 1 2 1169 1169_splitncnn_0 1169_splitncnn_1
Padding Pad_461 1 1 1169_splitncnn_1 1192 0=1 1=1 2=1 3=1 4=2
Convolution Conv_462 1 1 1192 1194 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_477 1 1 1194 1217 0=1 1=1 2=1 3=1 4=2
Convolution Conv_478 1 1 1217 1218 0=192 1=3 5=1 6=331776
Split splitncnn_23 1 2 1218 1218_splitncnn_0 1218_splitncnn_1
Pooling GlobalAveragePool_479 1 1 1218_splitncnn_1 1219 0=1 4=1
InnerProduct Conv_480 1 1 1219 1221 0=12 1=1 2=2304 9=1
InnerProduct Conv_482 1 1 1221 1223 0=192 1=1 2=2304 9=4
BinaryOp Mul_484 2 1 1218_splitncnn_0 1223 1224 0=2
BinaryOp Add_485 2 1 1224 1169_splitncnn_0 1225
Padding Pad_499 1 1 1225 1248 0=1 1=1 2=1 3=1 4=2
Convolution Conv_500 1 1 1248 1249 0=192 1=3 5=1 6=331776
BinaryOp Add_501 2 1 1249 553_splitncnn_1 1250
Split splitncnn_24 1 3 1250 1250_splitncnn_0 1250_splitncnn_1 1250_splitncnn_2
Padding Pad_515 1 1 1250_splitncnn_2 1273 0=1 1=1 2=1 3=1 4=2
Convolution Conv_516 1 1 1273 1275 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_531 1 1 1275 1298 0=1 1=1 2=1 3=1 4=2
Convolution Conv_532 1 1 1298 1299 0=192 1=3 5=1 6=331776
Split splitncnn_25 1 2 1299 1299_splitncnn_0 1299_splitncnn_1
Pooling GlobalAveragePool_533 1 1 1299_splitncnn_1 1300 0=1 4=1
InnerProduct Conv_534 1 1 1300 1302 0=12 1=1 2=2304 9=1
InnerProduct Conv_536 1 1 1302 1304 0=192 1=1 2=2304 9=4
BinaryOp Mul_538 2 1 1299_splitncnn_0 1304 1305 0=2
BinaryOp Add_539 2 1 1305 1250_splitncnn_1 1306
Split splitncnn_26 1 2 1306 1306_splitncnn_0 1306_splitncnn_1
Padding Pad_553 1 1 1306_splitncnn_1 1329 0=1 1=1 2=1 3=1 4=2
Convolution Conv_554 1 1 1329 1331 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_569 1 1 1331 1354 0=1 1=1 2=1 3=1 4=2
Convolution Conv_570 1 1 1354 1355 0=192 1=3 5=1 6=331776
Split splitncnn_27 1 2 1355 1355_splitncnn_0 1355_splitncnn_1
Pooling GlobalAveragePool_571 1 1 1355_splitncnn_1 1356 0=1 4=1
InnerProduct Conv_572 1 1 1356 1358 0=12 1=1 2=2304 9=1
InnerProduct Conv_574 1 1 1358 1360 0=192 1=1 2=2304 9=4
BinaryOp Mul_576 2 1 1355_splitncnn_0 1360 1361 0=2
BinaryOp Add_577 2 1 1361 1306_splitncnn_0 1362
Split splitncnn_28 1 2 1362 1362_splitncnn_0 1362_splitncnn_1
Padding Pad_591 1 1 1362_splitncnn_1 1385 0=1 1=1 2=1 3=1 4=2
Convolution Conv_592 1 1 1385 1387 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_607 1 1 1387 1410 0=1 1=1 2=1 3=1 4=2
Convolution Conv_608 1 1 1410 1411 0=192 1=3 5=1 6=331776
Split splitncnn_29 1 2 1411 1411_splitncnn_0 1411_splitncnn_1
Pooling GlobalAveragePool_609 1 1 1411_splitncnn_1 1412 0=1 4=1
InnerProduct Conv_610 1 1 1412 1414 0=12 1=1 2=2304 9=1
InnerProduct Conv_612 1 1 1414 1416 0=192 1=1 2=2304 9=4
BinaryOp Mul_614 2 1 1411_splitncnn_0 1416 1417 0=2
BinaryOp Add_615 2 1 1417 1362_splitncnn_0 1418
Split splitncnn_30 1 2 1418 1418_splitncnn_0 1418_splitncnn_1
Padding Pad_629 1 1 1418_splitncnn_1 1441 0=1 1=1 2=1 3=1 4=2
Convolution Conv_630 1 1 1441 1443 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_645 1 1 1443 1466 0=1 1=1 2=1 3=1 4=2
Convolution Conv_646 1 1 1466 1467 0=192 1=3 5=1 6=331776
Split splitncnn_31 1 2 1467 1467_splitncnn_0 1467_splitncnn_1
Pooling GlobalAveragePool_647 1 1 1467_splitncnn_1 1468 0=1 4=1
InnerProduct Conv_648 1 1 1468 1470 0=12 1=1 2=2304 9=1
InnerProduct Conv_650 1 1 1470 1472 0=192 1=1 2=2304 9=4
BinaryOp Mul_652 2 1 1467_splitncnn_0 1472 1473 0=2
BinaryOp Add_653 2 1 1473 1418_splitncnn_0 1474
Split splitncnn_32 1 2 1474 1474_splitncnn_0 1474_splitncnn_1
Padding Pad_667 1 1 1474_splitncnn_1 1497 0=1 1=1 2=1 3=1 4=2
Convolution Conv_668 1 1 1497 1499 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_683 1 1 1499 1522 0=1 1=1 2=1 3=1 4=2
Convolution Conv_684 1 1 1522 1523 0=192 1=3 5=1 6=331776
Split splitncnn_33 1 2 1523 1523_splitncnn_0 1523_splitncnn_1
Pooling GlobalAveragePool_685 1 1 1523_splitncnn_1 1524 0=1 4=1
InnerProduct Conv_686 1 1 1524 1526 0=12 1=1 2=2304 9=1
InnerProduct Conv_688 1 1 1526 1528 0=192 1=1 2=2304 9=4
BinaryOp Mul_690 2 1 1523_splitncnn_0 1528 1529 0=2
BinaryOp Add_691 2 1 1529 1474_splitncnn_0 1530
Split splitncnn_34 1 2 1530 1530_splitncnn_0 1530_splitncnn_1
Padding Pad_705 1 1 1530_splitncnn_1 1553 0=1 1=1 2=1 3=1 4=2
Convolution Conv_706 1 1 1553 1555 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_721 1 1 1555 1578 0=1 1=1 2=1 3=1 4=2
Convolution Conv_722 1 1 1578 1579 0=192 1=3 5=1 6=331776
Split splitncnn_35 1 2 1579 1579_splitncnn_0 1579_splitncnn_1
Pooling GlobalAveragePool_723 1 1 1579_splitncnn_1 1580 0=1 4=1
InnerProduct Conv_724 1 1 1580 1582 0=12 1=1 2=2304 9=1
InnerProduct Conv_726 1 1 1582 1584 0=192 1=1 2=2304 9=4
BinaryOp Mul_728 2 1 1579_splitncnn_0 1584 1585 0=2
BinaryOp Add_729 2 1 1585 1530_splitncnn_0 1586
Split splitncnn_36 1 2 1586 1586_splitncnn_0 1586_splitncnn_1
Padding Pad_743 1 1 1586_splitncnn_1 1609 0=1 1=1 2=1 3=1 4=2
Convolution Conv_744 1 1 1609 1611 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_759 1 1 1611 1634 0=1 1=1 2=1 3=1 4=2
Convolution Conv_760 1 1 1634 1635 0=192 1=3 5=1 6=331776
Split splitncnn_37 1 2 1635 1635_splitncnn_0 1635_splitncnn_1
Pooling GlobalAveragePool_761 1 1 1635_splitncnn_1 1636 0=1 4=1
InnerProduct Conv_762 1 1 1636 1638 0=12 1=1 2=2304 9=1
InnerProduct Conv_764 1 1 1638 1640 0=192 1=1 2=2304 9=4
BinaryOp Mul_766 2 1 1635_splitncnn_0 1640 1641 0=2
BinaryOp Add_767 2 1 1641 1586_splitncnn_0 1642
Split splitncnn_38 1 2 1642 1642_splitncnn_0 1642_splitncnn_1
Padding Pad_781 1 1 1642_splitncnn_1 1665 0=1 1=1 2=1 3=1 4=2
Convolution Conv_782 1 1 1665 1667 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_797 1 1 1667 1690 0=1 1=1 2=1 3=1 4=2
Convolution Conv_798 1 1 1690 1691 0=192 1=3 5=1 6=331776
Split splitncnn_39 1 2 1691 1691_splitncnn_0 1691_splitncnn_1
Pooling GlobalAveragePool_799 1 1 1691_splitncnn_1 1692 0=1 4=1
InnerProduct Conv_800 1 1 1692 1694 0=12 1=1 2=2304 9=1
InnerProduct Conv_802 1 1 1694 1696 0=192 1=1 2=2304 9=4
BinaryOp Mul_804 2 1 1691_splitncnn_0 1696 1697 0=2
BinaryOp Add_805 2 1 1697 1642_splitncnn_0 1698
Split splitncnn_40 1 2 1698 1698_splitncnn_0 1698_splitncnn_1
Padding Pad_819 1 1 1698_splitncnn_1 1721 0=1 1=1 2=1 3=1 4=2
Convolution Conv_820 1 1 1721 1723 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_835 1 1 1723 1746 0=1 1=1 2=1 3=1 4=2
Convolution Conv_836 1 1 1746 1747 0=192 1=3 5=1 6=331776
Split splitncnn_41 1 2 1747 1747_splitncnn_0 1747_splitncnn_1
Pooling GlobalAveragePool_837 1 1 1747_splitncnn_1 1748 0=1 4=1
InnerProduct Conv_838 1 1 1748 1750 0=12 1=1 2=2304 9=1
InnerProduct Conv_840 1 1 1750 1752 0=192 1=1 2=2304 9=4
BinaryOp Mul_842 2 1 1747_splitncnn_0 1752 1753 0=2
BinaryOp Add_843 2 1 1753 1698_splitncnn_0 1754
Split splitncnn_42 1 2 1754 1754_splitncnn_0 1754_splitncnn_1
Padding Pad_857 1 1 1754_splitncnn_1 1777 0=1 1=1 2=1 3=1 4=2
Convolution Conv_858 1 1 1777 1779 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_873 1 1 1779 1802 0=1 1=1 2=1 3=1 4=2
Convolution Conv_874 1 1 1802 1803 0=192 1=3 5=1 6=331776
Split splitncnn_43 1 2 1803 1803_splitncnn_0 1803_splitncnn_1
Pooling GlobalAveragePool_875 1 1 1803_splitncnn_1 1804 0=1 4=1
InnerProduct Conv_876 1 1 1804 1806 0=12 1=1 2=2304 9=1
InnerProduct Conv_878 1 1 1806 1808 0=192 1=1 2=2304 9=4
BinaryOp Mul_880 2 1 1803_splitncnn_0 1808 1809 0=2
BinaryOp Add_881 2 1 1809 1754_splitncnn_0 1810
Split splitncnn_44 1 2 1810 1810_splitncnn_0 1810_splitncnn_1
Padding Pad_895 1 1 1810_splitncnn_1 1833 0=1 1=1 2=1 3=1 4=2
Convolution Conv_896 1 1 1833 1835 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_911 1 1 1835 1858 0=1 1=1 2=1 3=1 4=2
Convolution Conv_912 1 1 1858 1859 0=192 1=3 5=1 6=331776
Split splitncnn_45 1 2 1859 1859_splitncnn_0 1859_splitncnn_1
Pooling GlobalAveragePool_913 1 1 1859_splitncnn_1 1860 0=1 4=1
InnerProduct Conv_914 1 1 1860 1862 0=12 1=1 2=2304 9=1
InnerProduct Conv_916 1 1 1862 1864 0=192 1=1 2=2304 9=4
BinaryOp Mul_918 2 1 1859_splitncnn_0 1864 1865 0=2
BinaryOp Add_919 2 1 1865 1810_splitncnn_0 1866
Split splitncnn_46 1 2 1866 1866_splitncnn_0 1866_splitncnn_1
Padding Pad_933 1 1 1866_splitncnn_1 1889 0=1 1=1 2=1 3=1 4=2
Convolution Conv_934 1 1 1889 1891 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_949 1 1 1891 1914 0=1 1=1 2=1 3=1 4=2
Convolution Conv_950 1 1 1914 1915 0=192 1=3 5=1 6=331776
Split splitncnn_47 1 2 1915 1915_splitncnn_0 1915_splitncnn_1
Pooling GlobalAveragePool_951 1 1 1915_splitncnn_1 1916 0=1 4=1
InnerProduct Conv_952 1 1 1916 1918 0=12 1=1 2=2304 9=1
InnerProduct Conv_954 1 1 1918 1920 0=192 1=1 2=2304 9=4
BinaryOp Mul_956 2 1 1915_splitncnn_0 1920 1921 0=2
BinaryOp Add_957 2 1 1921 1866_splitncnn_0 1922
Padding Pad_971 1 1 1922 1945 0=1 1=1 2=1 3=1 4=2
Convolution Conv_972 1 1 1945 1946 0=192 1=3 5=1 6=331776
BinaryOp Add_973 2 1 1946 1250_splitncnn_0 1947
Split splitncnn_48 1 3 1947 1947_splitncnn_0 1947_splitncnn_1 1947_splitncnn_2
Padding Pad_987 1 1 1947_splitncnn_2 1970 0=1 1=1 2=1 3=1 4=2
Convolution Conv_988 1 1 1970 1972 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1003 1 1 1972 1995 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1004 1 1 1995 1996 0=192 1=3 5=1 6=331776
Split splitncnn_49 1 2 1996 1996_splitncnn_0 1996_splitncnn_1
Pooling GlobalAveragePool_1005 1 1 1996_splitncnn_1 1997 0=1 4=1
InnerProduct Conv_1006 1 1 1997 1999 0=12 1=1 2=2304 9=1
InnerProduct Conv_1008 1 1 1999 2001 0=192 1=1 2=2304 9=4
BinaryOp Mul_1010 2 1 1996_splitncnn_0 2001 2002 0=2
BinaryOp Add_1011 2 1 2002 1947_splitncnn_1 2003
Split splitncnn_50 1 2 2003 2003_splitncnn_0 2003_splitncnn_1
Padding Pad_1025 1 1 2003_splitncnn_1 2026 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1026 1 1 2026 2028 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1041 1 1 2028 2051 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1042 1 1 2051 2052 0=192 1=3 5=1 6=331776
Split splitncnn_51 1 2 2052 2052_splitncnn_0 2052_splitncnn_1
Pooling GlobalAveragePool_1043 1 1 2052_splitncnn_1 2053 0=1 4=1
InnerProduct Conv_1044 1 1 2053 2055 0=12 1=1 2=2304 9=1
InnerProduct Conv_1046 1 1 2055 2057 0=192 1=1 2=2304 9=4
BinaryOp Mul_1048 2 1 2052_splitncnn_0 2057 2058 0=2
BinaryOp Add_1049 2 1 2058 2003_splitncnn_0 2059
Split splitncnn_52 1 2 2059 2059_splitncnn_0 2059_splitncnn_1
Padding Pad_1063 1 1 2059_splitncnn_1 2082 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1064 1 1 2082 2084 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1079 1 1 2084 2107 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1080 1 1 2107 2108 0=192 1=3 5=1 6=331776
Split splitncnn_53 1 2 2108 2108_splitncnn_0 2108_splitncnn_1
Pooling GlobalAveragePool_1081 1 1 2108_splitncnn_1 2109 0=1 4=1
InnerProduct Conv_1082 1 1 2109 2111 0=12 1=1 2=2304 9=1
InnerProduct Conv_1084 1 1 2111 2113 0=192 1=1 2=2304 9=4
BinaryOp Mul_1086 2 1 2108_splitncnn_0 2113 2114 0=2
BinaryOp Add_1087 2 1 2114 2059_splitncnn_0 2115
Split splitncnn_54 1 2 2115 2115_splitncnn_0 2115_splitncnn_1
Padding Pad_1101 1 1 2115_splitncnn_1 2138 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1102 1 1 2138 2140 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1117 1 1 2140 2163 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1118 1 1 2163 2164 0=192 1=3 5=1 6=331776
Split splitncnn_55 1 2 2164 2164_splitncnn_0 2164_splitncnn_1
Pooling GlobalAveragePool_1119 1 1 2164_splitncnn_1 2165 0=1 4=1
InnerProduct Conv_1120 1 1 2165 2167 0=12 1=1 2=2304 9=1
InnerProduct Conv_1122 1 1 2167 2169 0=192 1=1 2=2304 9=4
BinaryOp Mul_1124 2 1 2164_splitncnn_0 2169 2170 0=2
BinaryOp Add_1125 2 1 2170 2115_splitncnn_0 2171
Split splitncnn_56 1 2 2171 2171_splitncnn_0 2171_splitncnn_1
Padding Pad_1139 1 1 2171_splitncnn_1 2194 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1140 1 1 2194 2196 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1155 1 1 2196 2219 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1156 1 1 2219 2220 0=192 1=3 5=1 6=331776
Split splitncnn_57 1 2 2220 2220_splitncnn_0 2220_splitncnn_1
Pooling GlobalAveragePool_1157 1 1 2220_splitncnn_1 2221 0=1 4=1
InnerProduct Conv_1158 1 1 2221 2223 0=12 1=1 2=2304 9=1
InnerProduct Conv_1160 1 1 2223 2225 0=192 1=1 2=2304 9=4
BinaryOp Mul_1162 2 1 2220_splitncnn_0 2225 2226 0=2
BinaryOp Add_1163 2 1 2226 2171_splitncnn_0 2227
Split splitncnn_58 1 2 2227 2227_splitncnn_0 2227_splitncnn_1
Padding Pad_1177 1 1 2227_splitncnn_1 2250 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1178 1 1 2250 2252 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1193 1 1 2252 2275 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1194 1 1 2275 2276 0=192 1=3 5=1 6=331776
Split splitncnn_59 1 2 2276 2276_splitncnn_0 2276_splitncnn_1
Pooling GlobalAveragePool_1195 1 1 2276_splitncnn_1 2277 0=1 4=1
InnerProduct Conv_1196 1 1 2277 2279 0=12 1=1 2=2304 9=1
InnerProduct Conv_1198 1 1 2279 2281 0=192 1=1 2=2304 9=4
BinaryOp Mul_1200 2 1 2276_splitncnn_0 2281 2282 0=2
BinaryOp Add_1201 2 1 2282 2227_splitncnn_0 2283
Split splitncnn_60 1 2 2283 2283_splitncnn_0 2283_splitncnn_1
Padding Pad_1215 1 1 2283_splitncnn_1 2306 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1216 1 1 2306 2308 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1231 1 1 2308 2331 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1232 1 1 2331 2332 0=192 1=3 5=1 6=331776
Split splitncnn_61 1 2 2332 2332_splitncnn_0 2332_splitncnn_1
Pooling GlobalAveragePool_1233 1 1 2332_splitncnn_1 2333 0=1 4=1
InnerProduct Conv_1234 1 1 2333 2335 0=12 1=1 2=2304 9=1
InnerProduct Conv_1236 1 1 2335 2337 0=192 1=1 2=2304 9=4
BinaryOp Mul_1238 2 1 2332_splitncnn_0 2337 2338 0=2
BinaryOp Add_1239 2 1 2338 2283_splitncnn_0 2339
Split splitncnn_62 1 2 2339 2339_splitncnn_0 2339_splitncnn_1
Padding Pad_1253 1 1 2339_splitncnn_1 2362 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1254 1 1 2362 2364 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1269 1 1 2364 2387 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1270 1 1 2387 2388 0=192 1=3 5=1 6=331776
Split splitncnn_63 1 2 2388 2388_splitncnn_0 2388_splitncnn_1
Pooling GlobalAveragePool_1271 1 1 2388_splitncnn_1 2389 0=1 4=1
InnerProduct Conv_1272 1 1 2389 2391 0=12 1=1 2=2304 9=1
InnerProduct Conv_1274 1 1 2391 2393 0=192 1=1 2=2304 9=4
BinaryOp Mul_1276 2 1 2388_splitncnn_0 2393 2394 0=2
BinaryOp Add_1277 2 1 2394 2339_splitncnn_0 2395
Split splitncnn_64 1 2 2395 2395_splitncnn_0 2395_splitncnn_1
Padding Pad_1291 1 1 2395_splitncnn_1 2418 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1292 1 1 2418 2420 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1307 1 1 2420 2443 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1308 1 1 2443 2444 0=192 1=3 5=1 6=331776
Split splitncnn_65 1 2 2444 2444_splitncnn_0 2444_splitncnn_1
Pooling GlobalAveragePool_1309 1 1 2444_splitncnn_1 2445 0=1 4=1
InnerProduct Conv_1310 1 1 2445 2447 0=12 1=1 2=2304 9=1
InnerProduct Conv_1312 1 1 2447 2449 0=192 1=1 2=2304 9=4
BinaryOp Mul_1314 2 1 2444_splitncnn_0 2449 2450 0=2
BinaryOp Add_1315 2 1 2450 2395_splitncnn_0 2451
Split splitncnn_66 1 2 2451 2451_splitncnn_0 2451_splitncnn_1
Padding Pad_1329 1 1 2451_splitncnn_1 2474 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1330 1 1 2474 2476 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1345 1 1 2476 2499 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1346 1 1 2499 2500 0=192 1=3 5=1 6=331776
Split splitncnn_67 1 2 2500 2500_splitncnn_0 2500_splitncnn_1
Pooling GlobalAveragePool_1347 1 1 2500_splitncnn_1 2501 0=1 4=1
InnerProduct Conv_1348 1 1 2501 2503 0=12 1=1 2=2304 9=1
InnerProduct Conv_1350 1 1 2503 2505 0=192 1=1 2=2304 9=4
BinaryOp Mul_1352 2 1 2500_splitncnn_0 2505 2506 0=2
BinaryOp Add_1353 2 1 2506 2451_splitncnn_0 2507
Split splitncnn_68 1 2 2507 2507_splitncnn_0 2507_splitncnn_1
Padding Pad_1367 1 1 2507_splitncnn_1 2530 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1368 1 1 2530 2532 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1383 1 1 2532 2555 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1384 1 1 2555 2556 0=192 1=3 5=1 6=331776
Split splitncnn_69 1 2 2556 2556_splitncnn_0 2556_splitncnn_1
Pooling GlobalAveragePool_1385 1 1 2556_splitncnn_1 2557 0=1 4=1
InnerProduct Conv_1386 1 1 2557 2559 0=12 1=1 2=2304 9=1
InnerProduct Conv_1388 1 1 2559 2561 0=192 1=1 2=2304 9=4
BinaryOp Mul_1390 2 1 2556_splitncnn_0 2561 2562 0=2
BinaryOp Add_1391 2 1 2562 2507_splitncnn_0 2563
Split splitncnn_70 1 2 2563 2563_splitncnn_0 2563_splitncnn_1
Padding Pad_1405 1 1 2563_splitncnn_1 2586 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1406 1 1 2586 2588 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1421 1 1 2588 2611 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1422 1 1 2611 2612 0=192 1=3 5=1 6=331776
Split splitncnn_71 1 2 2612 2612_splitncnn_0 2612_splitncnn_1
Pooling GlobalAveragePool_1423 1 1 2612_splitncnn_1 2613 0=1 4=1
InnerProduct Conv_1424 1 1 2613 2615 0=12 1=1 2=2304 9=1
InnerProduct Conv_1426 1 1 2615 2617 0=192 1=1 2=2304 9=4
BinaryOp Mul_1428 2 1 2612_splitncnn_0 2617 2618 0=2
BinaryOp Add_1429 2 1 2618 2563_splitncnn_0 2619
Padding Pad_1443 1 1 2619 2642 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1444 1 1 2642 2643 0=192 1=3 5=1 6=331776
BinaryOp Add_1445 2 1 2643 1947_splitncnn_0 2644
Split splitncnn_72 1 3 2644 2644_splitncnn_0 2644_splitncnn_1 2644_splitncnn_2
Padding Pad_1459 1 1 2644_splitncnn_2 2667 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1460 1 1 2667 2669 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1475 1 1 2669 2692 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1476 1 1 2692 2693 0=192 1=3 5=1 6=331776
Split splitncnn_73 1 2 2693 2693_splitncnn_0 2693_splitncnn_1
Pooling GlobalAveragePool_1477 1 1 2693_splitncnn_1 2694 0=1 4=1
InnerProduct Conv_1478 1 1 2694 2696 0=12 1=1 2=2304 9=1
InnerProduct Conv_1480 1 1 2696 2698 0=192 1=1 2=2304 9=4
BinaryOp Mul_1482 2 1 2693_splitncnn_0 2698 2699 0=2
BinaryOp Add_1483 2 1 2699 2644_splitncnn_1 2700
Split splitncnn_74 1 2 2700 2700_splitncnn_0 2700_splitncnn_1
Padding Pad_1497 1 1 2700_splitncnn_1 2723 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1498 1 1 2723 2725 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1513 1 1 2725 2748 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1514 1 1 2748 2749 0=192 1=3 5=1 6=331776
Split splitncnn_75 1 2 2749 2749_splitncnn_0 2749_splitncnn_1
Pooling GlobalAveragePool_1515 1 1 2749_splitncnn_1 2750 0=1 4=1
InnerProduct Conv_1516 1 1 2750 2752 0=12 1=1 2=2304 9=1
InnerProduct Conv_1518 1 1 2752 2754 0=192 1=1 2=2304 9=4
BinaryOp Mul_1520 2 1 2749_splitncnn_0 2754 2755 0=2
BinaryOp Add_1521 2 1 2755 2700_splitncnn_0 2756
Split splitncnn_76 1 2 2756 2756_splitncnn_0 2756_splitncnn_1
Padding Pad_1535 1 1 2756_splitncnn_1 2779 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1536 1 1 2779 2781 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1551 1 1 2781 2804 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1552 1 1 2804 2805 0=192 1=3 5=1 6=331776
Split splitncnn_77 1 2 2805 2805_splitncnn_0 2805_splitncnn_1
Pooling GlobalAveragePool_1553 1 1 2805_splitncnn_1 2806 0=1 4=1
InnerProduct Conv_1554 1 1 2806 2808 0=12 1=1 2=2304 9=1
InnerProduct Conv_1556 1 1 2808 2810 0=192 1=1 2=2304 9=4
BinaryOp Mul_1558 2 1 2805_splitncnn_0 2810 2811 0=2
BinaryOp Add_1559 2 1 2811 2756_splitncnn_0 2812
Split splitncnn_78 1 2 2812 2812_splitncnn_0 2812_splitncnn_1
Padding Pad_1573 1 1 2812_splitncnn_1 2835 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1574 1 1 2835 2837 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1589 1 1 2837 2860 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1590 1 1 2860 2861 0=192 1=3 5=1 6=331776
Split splitncnn_79 1 2 2861 2861_splitncnn_0 2861_splitncnn_1
Pooling GlobalAveragePool_1591 1 1 2861_splitncnn_1 2862 0=1 4=1
InnerProduct Conv_1592 1 1 2862 2864 0=12 1=1 2=2304 9=1
InnerProduct Conv_1594 1 1 2864 2866 0=192 1=1 2=2304 9=4
BinaryOp Mul_1596 2 1 2861_splitncnn_0 2866 2867 0=2
BinaryOp Add_1597 2 1 2867 2812_splitncnn_0 2868
Split splitncnn_80 1 2 2868 2868_splitncnn_0 2868_splitncnn_1
Padding Pad_1611 1 1 2868_splitncnn_1 2891 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1612 1 1 2891 2893 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1627 1 1 2893 2916 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1628 1 1 2916 2917 0=192 1=3 5=1 6=331776
Split splitncnn_81 1 2 2917 2917_splitncnn_0 2917_splitncnn_1
Pooling GlobalAveragePool_1629 1 1 2917_splitncnn_1 2918 0=1 4=1
InnerProduct Conv_1630 1 1 2918 2920 0=12 1=1 2=2304 9=1
InnerProduct Conv_1632 1 1 2920 2922 0=192 1=1 2=2304 9=4
BinaryOp Mul_1634 2 1 2917_splitncnn_0 2922 2923 0=2
BinaryOp Add_1635 2 1 2923 2868_splitncnn_0 2924
Split splitncnn_82 1 2 2924 2924_splitncnn_0 2924_splitncnn_1
Padding Pad_1649 1 1 2924_splitncnn_1 2947 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1650 1 1 2947 2949 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1665 1 1 2949 2972 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1666 1 1 2972 2973 0=192 1=3 5=1 6=331776
Split splitncnn_83 1 2 2973 2973_splitncnn_0 2973_splitncnn_1
Pooling GlobalAveragePool_1667 1 1 2973_splitncnn_1 2974 0=1 4=1
InnerProduct Conv_1668 1 1 2974 2976 0=12 1=1 2=2304 9=1
InnerProduct Conv_1670 1 1 2976 2978 0=192 1=1 2=2304 9=4
BinaryOp Mul_1672 2 1 2973_splitncnn_0 2978 2979 0=2
BinaryOp Add_1673 2 1 2979 2924_splitncnn_0 2980
Split splitncnn_84 1 2 2980 2980_splitncnn_0 2980_splitncnn_1
Padding Pad_1687 1 1 2980_splitncnn_1 3003 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1688 1 1 3003 3005 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1703 1 1 3005 3028 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1704 1 1 3028 3029 0=192 1=3 5=1 6=331776
Split splitncnn_85 1 2 3029 3029_splitncnn_0 3029_splitncnn_1
Pooling GlobalAveragePool_1705 1 1 3029_splitncnn_1 3030 0=1 4=1
InnerProduct Conv_1706 1 1 3030 3032 0=12 1=1 2=2304 9=1
InnerProduct Conv_1708 1 1 3032 3034 0=192 1=1 2=2304 9=4
BinaryOp Mul_1710 2 1 3029_splitncnn_0 3034 3035 0=2
BinaryOp Add_1711 2 1 3035 2980_splitncnn_0 3036
Split splitncnn_86 1 2 3036 3036_splitncnn_0 3036_splitncnn_1
Padding Pad_1725 1 1 3036_splitncnn_1 3059 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1726 1 1 3059 3061 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1741 1 1 3061 3084 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1742 1 1 3084 3085 0=192 1=3 5=1 6=331776
Split splitncnn_87 1 2 3085 3085_splitncnn_0 3085_splitncnn_1
Pooling GlobalAveragePool_1743 1 1 3085_splitncnn_1 3086 0=1 4=1
InnerProduct Conv_1744 1 1 3086 3088 0=12 1=1 2=2304 9=1
InnerProduct Conv_1746 1 1 3088 3090 0=192 1=1 2=2304 9=4
BinaryOp Mul_1748 2 1 3085_splitncnn_0 3090 3091 0=2
BinaryOp Add_1749 2 1 3091 3036_splitncnn_0 3092
Split splitncnn_88 1 2 3092 3092_splitncnn_0 3092_splitncnn_1
Padding Pad_1763 1 1 3092_splitncnn_1 3115 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1764 1 1 3115 3117 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1779 1 1 3117 3140 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1780 1 1 3140 3141 0=192 1=3 5=1 6=331776
Split splitncnn_89 1 2 3141 3141_splitncnn_0 3141_splitncnn_1
Pooling GlobalAveragePool_1781 1 1 3141_splitncnn_1 3142 0=1 4=1
InnerProduct Conv_1782 1 1 3142 3144 0=12 1=1 2=2304 9=1
InnerProduct Conv_1784 1 1 3144 3146 0=192 1=1 2=2304 9=4
BinaryOp Mul_1786 2 1 3141_splitncnn_0 3146 3147 0=2
BinaryOp Add_1787 2 1 3147 3092_splitncnn_0 3148
Split splitncnn_90 1 2 3148 3148_splitncnn_0 3148_splitncnn_1
Padding Pad_1801 1 1 3148_splitncnn_1 3171 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1802 1 1 3171 3173 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1817 1 1 3173 3196 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1818 1 1 3196 3197 0=192 1=3 5=1 6=331776
Split splitncnn_91 1 2 3197 3197_splitncnn_0 3197_splitncnn_1
Pooling GlobalAveragePool_1819 1 1 3197_splitncnn_1 3198 0=1 4=1
InnerProduct Conv_1820 1 1 3198 3200 0=12 1=1 2=2304 9=1
InnerProduct Conv_1822 1 1 3200 3202 0=192 1=1 2=2304 9=4
BinaryOp Mul_1824 2 1 3197_splitncnn_0 3202 3203 0=2
BinaryOp Add_1825 2 1 3203 3148_splitncnn_0 3204
Split splitncnn_92 1 2 3204 3204_splitncnn_0 3204_splitncnn_1
Padding Pad_1839 1 1 3204_splitncnn_1 3227 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1840 1 1 3227 3229 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1855 1 1 3229 3252 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1856 1 1 3252 3253 0=192 1=3 5=1 6=331776
Split splitncnn_93 1 2 3253 3253_splitncnn_0 3253_splitncnn_1
Pooling GlobalAveragePool_1857 1 1 3253_splitncnn_1 3254 0=1 4=1
InnerProduct Conv_1858 1 1 3254 3256 0=12 1=1 2=2304 9=1
InnerProduct Conv_1860 1 1 3256 3258 0=192 1=1 2=2304 9=4
BinaryOp Mul_1862 2 1 3253_splitncnn_0 3258 3259 0=2
BinaryOp Add_1863 2 1 3259 3204_splitncnn_0 3260
Split splitncnn_94 1 2 3260 3260_splitncnn_0 3260_splitncnn_1
Padding Pad_1877 1 1 3260_splitncnn_1 3283 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1878 1 1 3283 3285 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1893 1 1 3285 3308 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1894 1 1 3308 3309 0=192 1=3 5=1 6=331776
Split splitncnn_95 1 2 3309 3309_splitncnn_0 3309_splitncnn_1
Pooling GlobalAveragePool_1895 1 1 3309_splitncnn_1 3310 0=1 4=1
InnerProduct Conv_1896 1 1 3310 3312 0=12 1=1 2=2304 9=1
InnerProduct Conv_1898 1 1 3312 3314 0=192 1=1 2=2304 9=4
BinaryOp Mul_1900 2 1 3309_splitncnn_0 3314 3315 0=2
BinaryOp Add_1901 2 1 3315 3260_splitncnn_0 3316
Padding Pad_1915 1 1 3316 3339 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1916 1 1 3339 3340 0=192 1=3 5=1 6=331776
BinaryOp Add_1917 2 1 3340 2644_splitncnn_0 3341
Split splitncnn_96 1 3 3341 3341_splitncnn_0 3341_splitncnn_1 3341_splitncnn_2
Padding Pad_1931 1 1 3341_splitncnn_2 3364 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1932 1 1 3364 3366 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1947 1 1 3366 3389 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1948 1 1 3389 3390 0=192 1=3 5=1 6=331776
Split splitncnn_97 1 2 3390 3390_splitncnn_0 3390_splitncnn_1
Pooling GlobalAveragePool_1949 1 1 3390_splitncnn_1 3391 0=1 4=1
InnerProduct Conv_1950 1 1 3391 3393 0=12 1=1 2=2304 9=1
InnerProduct Conv_1952 1 1 3393 3395 0=192 1=1 2=2304 9=4
BinaryOp Mul_1954 2 1 3390_splitncnn_0 3395 3396 0=2
BinaryOp Add_1955 2 1 3396 3341_splitncnn_1 3397
Split splitncnn_98 1 2 3397 3397_splitncnn_0 3397_splitncnn_1
Padding Pad_1969 1 1 3397_splitncnn_1 3420 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1970 1 1 3420 3422 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_1985 1 1 3422 3445 0=1 1=1 2=1 3=1 4=2
Convolution Conv_1986 1 1 3445 3446 0=192 1=3 5=1 6=331776
Split splitncnn_99 1 2 3446 3446_splitncnn_0 3446_splitncnn_1
Pooling GlobalAveragePool_1987 1 1 3446_splitncnn_1 3447 0=1 4=1
InnerProduct Conv_1988 1 1 3447 3449 0=12 1=1 2=2304 9=1
InnerProduct Conv_1990 1 1 3449 3451 0=192 1=1 2=2304 9=4
BinaryOp Mul_1992 2 1 3446_splitncnn_0 3451 3452 0=2
BinaryOp Add_1993 2 1 3452 3397_splitncnn_0 3453
Split splitncnn_100 1 2 3453 3453_splitncnn_0 3453_splitncnn_1
Padding Pad_2007 1 1 3453_splitncnn_1 3476 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2008 1 1 3476 3478 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2023 1 1 3478 3501 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2024 1 1 3501 3502 0=192 1=3 5=1 6=331776
Split splitncnn_101 1 2 3502 3502_splitncnn_0 3502_splitncnn_1
Pooling GlobalAveragePool_2025 1 1 3502_splitncnn_1 3503 0=1 4=1
InnerProduct Conv_2026 1 1 3503 3505 0=12 1=1 2=2304 9=1
InnerProduct Conv_2028 1 1 3505 3507 0=192 1=1 2=2304 9=4
BinaryOp Mul_2030 2 1 3502_splitncnn_0 3507 3508 0=2
BinaryOp Add_2031 2 1 3508 3453_splitncnn_0 3509
Split splitncnn_102 1 2 3509 3509_splitncnn_0 3509_splitncnn_1
Padding Pad_2045 1 1 3509_splitncnn_1 3532 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2046 1 1 3532 3534 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2061 1 1 3534 3557 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2062 1 1 3557 3558 0=192 1=3 5=1 6=331776
Split splitncnn_103 1 2 3558 3558_splitncnn_0 3558_splitncnn_1
Pooling GlobalAveragePool_2063 1 1 3558_splitncnn_1 3559 0=1 4=1
InnerProduct Conv_2064 1 1 3559 3561 0=12 1=1 2=2304 9=1
InnerProduct Conv_2066 1 1 3561 3563 0=192 1=1 2=2304 9=4
BinaryOp Mul_2068 2 1 3558_splitncnn_0 3563 3564 0=2
BinaryOp Add_2069 2 1 3564 3509_splitncnn_0 3565
Split splitncnn_104 1 2 3565 3565_splitncnn_0 3565_splitncnn_1
Padding Pad_2083 1 1 3565_splitncnn_1 3588 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2084 1 1 3588 3590 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2099 1 1 3590 3613 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2100 1 1 3613 3614 0=192 1=3 5=1 6=331776
Split splitncnn_105 1 2 3614 3614_splitncnn_0 3614_splitncnn_1
Pooling GlobalAveragePool_2101 1 1 3614_splitncnn_1 3615 0=1 4=1
InnerProduct Conv_2102 1 1 3615 3617 0=12 1=1 2=2304 9=1
InnerProduct Conv_2104 1 1 3617 3619 0=192 1=1 2=2304 9=4
BinaryOp Mul_2106 2 1 3614_splitncnn_0 3619 3620 0=2
BinaryOp Add_2107 2 1 3620 3565_splitncnn_0 3621
Split splitncnn_106 1 2 3621 3621_splitncnn_0 3621_splitncnn_1
Padding Pad_2121 1 1 3621_splitncnn_1 3644 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2122 1 1 3644 3646 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2137 1 1 3646 3669 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2138 1 1 3669 3670 0=192 1=3 5=1 6=331776
Split splitncnn_107 1 2 3670 3670_splitncnn_0 3670_splitncnn_1
Pooling GlobalAveragePool_2139 1 1 3670_splitncnn_1 3671 0=1 4=1
InnerProduct Conv_2140 1 1 3671 3673 0=12 1=1 2=2304 9=1
InnerProduct Conv_2142 1 1 3673 3675 0=192 1=1 2=2304 9=4
BinaryOp Mul_2144 2 1 3670_splitncnn_0 3675 3676 0=2
BinaryOp Add_2145 2 1 3676 3621_splitncnn_0 3677
Split splitncnn_108 1 2 3677 3677_splitncnn_0 3677_splitncnn_1
Padding Pad_2159 1 1 3677_splitncnn_1 3700 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2160 1 1 3700 3702 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2175 1 1 3702 3725 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2176 1 1 3725 3726 0=192 1=3 5=1 6=331776
Split splitncnn_109 1 2 3726 3726_splitncnn_0 3726_splitncnn_1
Pooling GlobalAveragePool_2177 1 1 3726_splitncnn_1 3727 0=1 4=1
InnerProduct Conv_2178 1 1 3727 3729 0=12 1=1 2=2304 9=1
InnerProduct Conv_2180 1 1 3729 3731 0=192 1=1 2=2304 9=4
BinaryOp Mul_2182 2 1 3726_splitncnn_0 3731 3732 0=2
BinaryOp Add_2183 2 1 3732 3677_splitncnn_0 3733
Split splitncnn_110 1 2 3733 3733_splitncnn_0 3733_splitncnn_1
Padding Pad_2197 1 1 3733_splitncnn_1 3756 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2198 1 1 3756 3758 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2213 1 1 3758 3781 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2214 1 1 3781 3782 0=192 1=3 5=1 6=331776
Split splitncnn_111 1 2 3782 3782_splitncnn_0 3782_splitncnn_1
Pooling GlobalAveragePool_2215 1 1 3782_splitncnn_1 3783 0=1 4=1
InnerProduct Conv_2216 1 1 3783 3785 0=12 1=1 2=2304 9=1
InnerProduct Conv_2218 1 1 3785 3787 0=192 1=1 2=2304 9=4
BinaryOp Mul_2220 2 1 3782_splitncnn_0 3787 3788 0=2
BinaryOp Add_2221 2 1 3788 3733_splitncnn_0 3789
Split splitncnn_112 1 2 3789 3789_splitncnn_0 3789_splitncnn_1
Padding Pad_2235 1 1 3789_splitncnn_1 3812 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2236 1 1 3812 3814 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2251 1 1 3814 3837 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2252 1 1 3837 3838 0=192 1=3 5=1 6=331776
Split splitncnn_113 1 2 3838 3838_splitncnn_0 3838_splitncnn_1
Pooling GlobalAveragePool_2253 1 1 3838_splitncnn_1 3839 0=1 4=1
InnerProduct Conv_2254 1 1 3839 3841 0=12 1=1 2=2304 9=1
InnerProduct Conv_2256 1 1 3841 3843 0=192 1=1 2=2304 9=4
BinaryOp Mul_2258 2 1 3838_splitncnn_0 3843 3844 0=2
BinaryOp Add_2259 2 1 3844 3789_splitncnn_0 3845
Split splitncnn_114 1 2 3845 3845_splitncnn_0 3845_splitncnn_1
Padding Pad_2273 1 1 3845_splitncnn_1 3868 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2274 1 1 3868 3870 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2289 1 1 3870 3893 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2290 1 1 3893 3894 0=192 1=3 5=1 6=331776
Split splitncnn_115 1 2 3894 3894_splitncnn_0 3894_splitncnn_1
Pooling GlobalAveragePool_2291 1 1 3894_splitncnn_1 3895 0=1 4=1
InnerProduct Conv_2292 1 1 3895 3897 0=12 1=1 2=2304 9=1
InnerProduct Conv_2294 1 1 3897 3899 0=192 1=1 2=2304 9=4
BinaryOp Mul_2296 2 1 3894_splitncnn_0 3899 3900 0=2
BinaryOp Add_2297 2 1 3900 3845_splitncnn_0 3901
Split splitncnn_116 1 2 3901 3901_splitncnn_0 3901_splitncnn_1
Padding Pad_2311 1 1 3901_splitncnn_1 3924 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2312 1 1 3924 3926 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2327 1 1 3926 3949 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2328 1 1 3949 3950 0=192 1=3 5=1 6=331776
Split splitncnn_117 1 2 3950 3950_splitncnn_0 3950_splitncnn_1
Pooling GlobalAveragePool_2329 1 1 3950_splitncnn_1 3951 0=1 4=1
InnerProduct Conv_2330 1 1 3951 3953 0=12 1=1 2=2304 9=1
InnerProduct Conv_2332 1 1 3953 3955 0=192 1=1 2=2304 9=4
BinaryOp Mul_2334 2 1 3950_splitncnn_0 3955 3956 0=2
BinaryOp Add_2335 2 1 3956 3901_splitncnn_0 3957
Split splitncnn_118 1 2 3957 3957_splitncnn_0 3957_splitncnn_1
Padding Pad_2349 1 1 3957_splitncnn_1 3980 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2350 1 1 3980 3982 0=192 1=3 5=1 6=331776 9=2 -23310=1,2.000000e-01
Padding Pad_2365 1 1 3982 4005 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2366 1 1 4005 4006 0=192 1=3 5=1 6=331776
Split splitncnn_119 1 2 4006 4006_splitncnn_0 4006_splitncnn_1
Pooling GlobalAveragePool_2367 1 1 4006_splitncnn_1 4007 0=1 4=1
InnerProduct Conv_2368 1 1 4007 4009 0=12 1=1 2=2304 9=1
InnerProduct Conv_2370 1 1 4009 4011 0=192 1=1 2=2304 9=4
BinaryOp Mul_2372 2 1 4006_splitncnn_0 4011 4012 0=2
BinaryOp Add_2373 2 1 4012 3957_splitncnn_0 4013
Padding Pad_2387 1 1 4013 4036 0=1 1=1 2=1 3=1 4=2
Convolution Conv_2388 1 1 4036 4037 0=192 1=3 5=1 6=331776
BinaryOp Add_2389 2 1 4037 3341_splitncnn_0 4038
BinaryOp Add_2390 2 1 4038 553_splitncnn_0 4039
Convolution Conv_2391 1 1 4039 4040 0=192 1=3 4=1 5=1 6=331776
PixelShuffle Reshape_2409 1 1 4040 4070 0=8

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2020 nihui
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2020 nihui
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

129
Pkgs/licenses/ffmpeg.md Normal file
View File

@ -0,0 +1,129 @@
# License
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
FFmpeg.
Some optional parts of FFmpeg are licensed under the GNU General Public License
version 2 or later (GPL v2+). See the file `COPYING.GPLv2` for details. None of
these parts are used by default, you have to explicitly pass `--enable-gpl` to
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
Specifically, the GPL parts of FFmpeg are:
- libpostproc
- optional x86 optimization in the files
- `libavcodec/x86/flac_dsp_gpl.asm`
- `libavcodec/x86/idct_mmx.c`
- `libavfilter/x86/vf_removegrain.asm`
- the following building and testing tools
- `compat/solaris/make_sunver.pl`
- `doc/t2h.pm`
- `doc/texi2pod.pl`
- `libswresample/tests/swresample.c`
- `tests/checkasm/*`
- `tests/tiny_ssim.c`
- the following filters in libavfilter:
- `signature_lookup.c`
- `vf_blackframe.c`
- `vf_boxblur.c`
- `vf_colormatrix.c`
- `vf_cover_rect.c`
- `vf_cropdetect.c`
- `vf_delogo.c`
- `vf_eq.c`
- `vf_find_rect.c`
- `vf_fspp.c`
- `vf_histeq.c`
- `vf_hqdn3d.c`
- `vf_kerndeint.c`
- `vf_lensfun.c` (GPL version 3 or later)
- `vf_mcdeint.c`
- `vf_mpdecimate.c`
- `vf_nnedi.c`
- `vf_owdenoise.c`
- `vf_perspective.c`
- `vf_phase.c`
- `vf_pp.c`
- `vf_pp7.c`
- `vf_pullup.c`
- `vf_repeatfields.c`
- `vf_sab.c`
- `vf_signature.c`
- `vf_smartblur.c`
- `vf_spp.c`
- `vf_stereo3d.c`
- `vf_super2xsai.c`
- `vf_tinterlace.c`
- `vf_uspp.c`
- `vf_vaguedenoiser.c`
- `vsrc_mptestsrc.c`
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
the configure parameter `--enable-version3` will activate this licensing option
for you. Read the file `COPYING.LGPLv3` or, if you have enabled GPL parts,
`COPYING.GPLv3` to learn the exact legal terms that apply in this case.
There are a handful of files under other licensing terms, namely:
* The files `libavcodec/jfdctfst.c`, `libavcodec/jfdctint_template.c` and
`libavcodec/jrevdct.c` are taken from libjpeg, see the top of the files for
licensing details. Specifically note that you must credit the IJG in the
documentation accompanying your program if you only distribute executables.
You must also indicate any changes including additions and deletions to
those three files in the documentation.
* `tests/reference.pnm` is under the expat license.
## External libraries
FFmpeg can be combined with a number of external libraries, which sometimes
affect the licensing of binaries resulting from the combination.
### Compatible libraries
The following libraries are under GPL version 2:
- avisynth
- frei0r
- libcdio
- libdavs2
- librubberband
- libvidstab
- libx264
- libx265
- libxavs
- libxavs2
- libxvid
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
passing `--enable-gpl` to configure.
The following libraries are under LGPL version 3:
- gmp
- libaribb24
- liblensfun
When combining them with FFmpeg, use the configure option `--enable-version3` to
upgrade FFmpeg to the LGPL v3.
The VMAF, mbedTLS, RK MPI, OpenCORE and VisualOn libraries are under the Apache License
2.0. That license is incompatible with the LGPL v2.1 and the GPL v2, but not with
version 3 of those licenses. So to combine these libraries with FFmpeg, the
license version needs to be upgraded by passing `--enable-version3` to configure.
The smbclient library is under the GPL v3, to combine it with FFmpeg,
the options `--enable-gpl` and `--enable-version3` have to be passed to
configure to upgrade FFmpeg to the GPL v3.
### Incompatible libraries
There are certain libraries you can combine with FFmpeg whose licenses are not
compatible with the GPL and/or the LGPL. If you wish to enable these
libraries, even in circumstances that their license may be incompatible, pass
`--enable-nonfree` to configure. This will cause the resulting binary to be
unredistributable.
The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
compatible with the LGPL.

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2020 nihui
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

21
Pkgs/licenses/rife.txt Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 hzwer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,190 @@
import sys
import os
import cv2
import torch
import argparse
import numpy as np
#from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
import skvideo.io
from queue import Queue, Empty
#import moviepy.editor
import shutil
warnings.filterwarnings("ignore")
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
print("Changing working dir to {0}".format(dname))
os.chdir(os.path.dirname(dname))
print("Added {0} to PATH".format(dname))
sys.path.append(dname)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
else:
print("WARNING: CUDA is not available, RIFE is running on CPU! [ff:nocuda-cpu]")
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--video', dest='video', type=str, default=None)
parser.add_argument('--img', dest='img', type=str, default=None)
parser.add_argument('--output', required=False, default='frames-interpolated')
parser.add_argument('--imgformat', default="png")
parser.add_argument('--montage', default=False, dest='montage', action='store_true', help='montage origin video')
parser.add_argument('--skip', dest='skip', default=False, action='store_true', help='whether to remove static frames before processing')
parser.add_argument('--fps', dest='fps', type=int, default=None)
parser.add_argument('--png', dest='png', default=True, action='store_true', help='whether to vid_out png format vid_outs')
parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
parser.add_argument('--exp', dest='exp', type=int, default=1)
args = parser.parse_args()
assert (not args.video is None or not args.img is None)
if not args.img is None:
args.png = True
from model.RIFE_HD import Model
model = Model()
model.load_model(os.path.join(dname, "models"), -1)
model.eval()
model.device()
path = args.img
name = os.path.basename(path)
print('name: ' + name)
interp_output_path = (args.output).join(path.rsplit(name, 1))
print('interp_output_path: ' + interp_output_path)
if not args.video is None:
videoCapture = cv2.VideoCapture(args.video)
fps = videoCapture.get(cv2.CAP_PROP_FPS)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
videoCapture.release()
if args.fps is None:
fpsNotAssigned = True
args.fps = fps * (2 ** args.exp)
else:
fpsNotAssigned = False
videogen = skvideo.io.vreader(args.video)
lastframe = next(videogen)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video_path_wo_ext, ext = os.path.splitext(args.video)
print('{} frames in total'.format(tot_frame))
else:
videogen = []
for f in os.listdir(args.img):
if 'png' in f:
videogen.append(f)
tot_frame = len(videogen)
videogen.sort(key= lambda x:int(x[:-4]))
lastframe = cv2.imread(os.path.join(args.img, videogen[0]))[:, :, ::-1].copy()
videogen = videogen[1:]
h, w, _ = lastframe.shape
vid_out = None
if args.png:
if not os.path.exists(interp_output_path):
os.mkdir(interp_output_path)
else:
vid_out = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h))
def clear_write_buffer(user_args, write_buffer):
cnt = 1
while True:
item = write_buffer.get()
if item is None:
break
if user_args.png:
print('=> {:0>8d}.png'.format(cnt))
cv2.imwrite('{}/{:0>8d}.png'.format(interp_output_path, cnt), item[:, :, ::-1])
#cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
cnt += 1
else:
vid_out.write(item[:, :, ::-1])
def build_read_buffer(user_args, read_buffer, videogen):
for frame in videogen:
if not user_args.img is None:
frame = cv2.imread(os.path.join(user_args.img, frame))[:, :, ::-1].copy()
if user_args.montage:
frame = frame[:, left: left + w]
read_buffer.put(frame)
read_buffer.put(None)
def make_inference(I0, I1, exp):
global model
middle = model.inference(I0, I1)
if exp == 1:
return [middle]
first_half = make_inference(I0, middle, exp=exp - 1)
second_half = make_inference(middle, I1, exp=exp - 1)
return [*first_half, middle, *second_half]
if args.montage:
left = w // 4
w = w // 2
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
#pbar = tqdm(total=tot_frame)
skip_frame = 1
if args.montage:
lastframe = lastframe[:, left: left + w]
write_buffer = Queue(maxsize=500)
read_buffer = Queue(maxsize=500)
_thread.start_new_thread(build_read_buffer, (args, read_buffer, videogen))
_thread.start_new_thread(clear_write_buffer, (args, write_buffer))
I1 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
I1 = F.pad(I1, padding)
while True:
frame = read_buffer.get()
if frame is None:
break
I0 = I1
I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
I1 = F.pad(I1, padding)
#p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)
# - F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs().mean()
#if p < 5e-3 and args.skip:
# if skip_frame % 100 == 0:
# print("Warning: Your video has {} static frames, skipping them may change the duration of the generated video.".format(skip_frame))
# skip_frame += 1
# #pbar.update(1)
# continue
#if p > 0.2:
# mid1 = lastframe
# mid0 = lastframe
# mid2 = lastframe
#else:
output = make_inference(I0, I1, args.exp)
if args.montage:
write_buffer.put(np.concatenate((lastframe, lastframe), 1))
for mid in output:
mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
write_buffer.put(np.concatenate((lastframe, mid[:h, :w]), 1))
else:
write_buffer.put(lastframe)
for mid in output:
mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
write_buffer.put(mid[:h, :w])
#pbar.update(1)
lastframe = frame
if args.montage:
write_buffer.put(np.concatenate((lastframe, lastframe), 1))
else:
write_buffer.put(lastframe)
import time
while(not write_buffer.empty()):
time.sleep(0.1)
#pbar.close()
if not vid_out is None:
vid_out.release()
# move audio to new video file if appropriate
if args.png == False and fpsNotAssigned == True and not args.skip:
outputVideoFileName = '{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext)
transferAudio(video_path_wo_ext + "." + args.ext, outputVideoFileName)

View File

@ -1,125 +0,0 @@
import sys
import cv2
import os
import numpy as np
import shutil
import argparse
import torch
import torchvision
from torchvision import transforms
from torch.nn import functional as F
from PIL import Image
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
print("Changing working dir to {0}".format(dname))
os.chdir(os.path.dirname(dname))
print("Added {0} to PATH".format(dname))
sys.path.append(dname)
from model.RIFE import Model
from glob import glob
from imageio import imread, imsave
from torch.autograd import Variable
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
else:
print("WARNING: CUDA is not available, RIFE is running on CPU! [ff:nocuda-cpu]")
RIFE_model = Model()
RIFE_model.load_model(os.path.join(dname, "models"))
RIFE_model.eval()
RIFE_model.device()
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=False, default='frames-interpolated')
parser.add_argument('--times', default=2, type=int)
parser.add_argument('--imgformat', default="png")
args = parser.parse_args()
path = args.input
name = os.path.basename(path)
length = len(glob(path + '/*.png'))
#interp_output_path = path.replace(name, name+'-interpolated')
interp_output_path = (args.output).join(path.rsplit(name, 1))
os.makedirs(interp_output_path, exist_ok = True)
#output_path = path.replace('tmp', 'output')
try:
print("In Path: {0}".format(path))
print("Out Path: {0}".format(interp_output_path))
except:
print("Failed to print in/out paths. This might not be a problem, but it shouldn't happen either.")
#if os.path.isfile(output_path):
# exit
ext = args.imgformat
with torch.no_grad():
# if not os.path.isfile('{:s}/00000001.png'.format(interp_output_path)):
output_frame_number = 1
# shutil.copyfile('{:s}/{:08d}.png'.format(path, output_frame_number), '{:s}/00000001.png'.format(interp_output_path)) # Copy first frame
cv2.imwrite('{:s}/00000001.{}'.format(interp_output_path, ext), cv2.imread('{:s}/{:08d}.png'.format(path, output_frame_number), 1)) # Write first frame
output_frame_number += 1
for input_frame_number in range(1, length):
print("Interpolating frame {0} of {1}...".format(input_frame_number, length))
frame_0_path = '{:s}/{:08d}.png'.format(path, input_frame_number)
frame_1_path = '{:s}/{:08d}.png'.format(path, input_frame_number + 1)
frame0 = cv2.imread(frame_0_path)
frame1 = cv2.imread(frame_1_path)
img0 = (torch.tensor(frame0.transpose(2, 0, 1)).to(device, non_blocking=True) / 255.).unsqueeze(0)
img1 = (torch.tensor(frame1.transpose(2, 0, 1)).to(device, non_blocking=True) / 255.).unsqueeze(0)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
img0 = F.pad(img0, padding)
img1 = F.pad(img1, padding)
img_list = [img0, img1]
for i in range(args.times):
tmp = []
for j in range(len(img_list) - 1):
mid = RIFE_model.inference(img_list[j], img_list[j + 1])
tmp.append(img_list[j])
tmp.append(mid)
tmp.append(img1)
img_list = tmp
#print("Out Frame Num: {0}".format(output_frame_number))
for i in range(len(img_list)):
if i == 0:
continue
cv2.imwrite('{:s}/{:08d}.{}'.format(interp_output_path, output_frame_number, ext), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
#print("Writing image from array")
#print("Out Frame Num: {0}".format(output_frame_number))
output_frame_number += 1
print("Written output frame {0}.".format(output_frame_number))
input_frame_number += 1;
print("Copying frame {0} of {1}...".format(input_frame_number, length))
print("Copying in/{0} to out/{1}".format(input_frame_number, output_frame_number))
# shutil.copyfile('{:s}/{:08d}.png'.format(path, input_frame_number), '{:s}/{:08d}.png'.format(interp_output_path, output_frame_number)) # Copy last frame
cv2.imwrite('{:s}/{:08d}.{}'.format(interp_output_path, output_frame_number, ext), cv2.imread('{:s}/{:08d}.png'.format(path, input_frame_number), 1)) # Write last frame
print("Done!")

View File

@ -1,158 +0,0 @@
import sys
import os
import cv2
import torch
import argparse
import numpy as np
#from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
#import skvideo.io
from queue import Queue
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
print("Changing working dir to {0}".format(dname))
os.chdir(os.path.dirname(dname))
print("Added {0} to PATH".format(dname))
sys.path.append(dname)
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
else:
print("WARNING: CUDA is not available, RIFE is running on CPU! [ff:nocuda-cpu]")
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=False, default='frames-interpolated')
parser.add_argument('--imgformat', default="png")
parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
#parser.add_argument('--scn', dest='scn', default=False, help='enable scene detection')
#parser.add_argument('--fps', dest='fps', type=int, default=None)
parser.add_argument('--png', dest='png', default=True, help='whether to output png format outputs')
#parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension')
parser.add_argument('--times', dest='times', type=int, default=1, help='interpolation exponent (default: 1)')
args = parser.parse_args()
assert (args.times in [1, 2, 3])
args.exptimes = 2 ** args.times
from model.RIFE import Model
model = Model()
model.load_model(os.path.join(dname, "models"))
model.eval()
model.device()
videoCapture = cv2.VideoCapture("{}/%08d.png".format(args.input),cv2.CAP_IMAGES)
#fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS))
#videogen = skvideo.io.vreader(args.video)
success, frame = videoCapture.read()
h, w, _ = frame.shape
path = args.input
name = os.path.basename(path)
print('name: ' + name)
interp_output_path = (args.output).join(path.rsplit(name, 1))
print('interp_output_path: ' + interp_output_path)
#if args.fps is None:
# args.fps = fps * args.exptimes
#fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#video_path_wo_ext, ext = os.path.splitext(args.video)
if args.png:
if not os.path.exists(interp_output_path):
os.mkdir(interp_output_path)
vid_out = None
#else:
# vid_out = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exptimes, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h))
cnt = 0
skip_frame = 1
buffer = Queue()
def write_frame(i0, infs, i1, p, user_args):
global skip_frame, cnt
for i in range(i0.shape[0]):
l = len(infs)
# A video transition occurs.
#if p[i] > 0.2:
# print('Transition! Duplicting frame instead of interpolating.')
# for j in range(len(infs)):
# infs[j][i] = i0[i]
# Result was too similar to previous frame, skip if given.
#if p[i] < 5e-3 and user_args.skip:
# if skip_frame % 100 == 0:
# print("Warning: Your video has {} static frames, "
# "skipping them may change the duration of the generated video.".format(skip_frame))
# skip_frame += 1
# continue
# Write results.
buffer.put(i0[i])
for inf in infs:
buffer.put(inf[i])
def clear_buffer(user_args):
global cnt
while True:
item = buffer.get()
if item is None:
break
if user_args.png:
print('=> {:0>8d}.png'.format(cnt))
cv2.imwrite('{}/{:0>8d}.png'.format(interp_output_path, cnt), item[:, :, ::1])
cnt += 1
else:
vid_out.write(item[:, :, ::-1])
def make_inference(model, I0, I1, exp):
middle = model.inference(I0, I1)
if exp == 1:
return [middle]
first_half = make_inference(model, I0, middle, exp=exp - 1)
second_half = make_inference(model, middle, I1, exp=exp - 1)
return [*first_half, middle, *second_half]
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
print('{} frames in total'.format(tot_frame))
#pbar = tqdm(total=tot_frame)
img_list = []
_thread.start_new_thread(clear_buffer, (args, ))
while success:
success, frame = videoCapture.read()
if success:
img_list.append(frame)
if len(img_list) == 5 or (not success and len(img_list) > 1):
imgs = torch.from_numpy(np.transpose(img_list, (0, 3, 1, 2))).to(device, non_blocking=True).float() / 255.
I0 = imgs[:-1]
I1 = imgs[1:]
p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)
- F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs()
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
inferences = make_inference(model, I0, I1, exp=args.times)
I0 = np.array(img_list[:-1])
I1 = np.array(img_list[1:])
inferences = list(map(lambda x: ((x[:, :, :h, :w] * 255.).byte().cpu().detach().numpy().transpose(0, 2, 3, 1)), inferences))
write_frame(I0, inferences, I1, p.mean(3).mean(2).mean(1), args)
#pbar.update(4)
img_list = img_list[-1:]
buffer.put(img_list[0])
import time
while(not buffer.empty()):
time.sleep(0.1)
time.sleep(0.5)
#pbar.close()
#if not vid_out is None:
# vid_out.release()

View File

@ -67,7 +67,7 @@ class IFBlock(nn.Module):
def forward(self, x):
if self.scale != 1:
x = F.interpolate(x, scale_factor=1. / self.scale, mode="bilinear",
align_corners=False, recompute_scale_factor=False)
align_corners=False)
x = self.conv0(x)
x = self.res0(x)
x = self.res1(x)
@ -79,7 +79,7 @@ class IFBlock(nn.Module):
flow = self.up(x)
if self.scale != 1:
flow = F.interpolate(flow, scale_factor=self.scale, mode="bilinear",
align_corners=False, recompute_scale_factor=False)
align_corners=False)
return flow
@ -92,7 +92,7 @@ class IFNet(nn.Module):
def forward(self, x):
x = F.interpolate(x, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False)
align_corners=False)
flow0 = self.block0(x)
F1 = flow0
warped_img0 = warp(x[:, :3], F1)

View File

@ -0,0 +1,115 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from model.warplayer import warp
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv_wo_act(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_wo_act(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
class IFBlock(nn.Module):
def __init__(self, in_planes, scale=1, c=64):
super(IFBlock, self).__init__()
self.scale = scale
self.conv0 = conv(in_planes, c, 3, 1, 1)
self.res0 = ResBlock(c, c)
self.res1 = ResBlock(c, c)
self.res2 = ResBlock(c, c)
self.res3 = ResBlock(c, c)
self.res4 = ResBlock(c, c)
self.res5 = ResBlock(c, c)
self.conv1 = nn.Conv2d(c, 2, 3, 1, 1)
self.up = nn.PixelShuffle(2)
def forward(self, x):
if self.scale != 1:
x = F.interpolate(x, scale_factor=1. / self.scale, mode="bilinear",
align_corners=False)
x = self.conv0(x)
x = self.res0(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = self.conv1(x)
flow = x # self.up(x)
if self.scale != 1:
flow = F.interpolate(flow, scale_factor=self.scale, mode="bilinear",
align_corners=False)
return flow
class IFNet(nn.Module):
def __init__(self):
super(IFNet, self).__init__()
self.block0 = IFBlock(6, scale=4, c=192)
self.block1 = IFBlock(8, scale=2, c=128)
self.block2 = IFBlock(8, scale=1, c=64)
def forward(self, x):
x = F.interpolate(x, scale_factor=0.5, mode="bilinear",
align_corners=False)
flow0 = self.block0(x)
F1 = flow0
warped_img0 = warp(x[:, :3], F1)
warped_img1 = warp(x[:, 3:], -F1)
flow1 = self.block1(torch.cat((warped_img0, warped_img1, F1), 1))
F2 = (flow0 + flow1)
warped_img0 = warp(x[:, :3], F2)
warped_img1 = warp(x[:, 3:], -F2)
flow2 = self.block2(torch.cat((warped_img0, warped_img1, F2), 1))
F3 = (flow0 + flow1 + flow2)
return F3, [F1, F2, F3]
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
flownet = IFNet()
flow, _ = flownet(imgs)
print(flow.shape)

View File

@ -0,0 +1,120 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from model.warplayer import warp
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv_wo_act(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 5, stride, 2)
self.conv2 = conv_wo_act(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
class IFBlock(nn.Module):
def __init__(self, in_planes, scale=1, c=64):
super(IFBlock, self).__init__()
self.scale = scale
self.conv0 = conv(in_planes, c, 5, 2, 2)
self.res0 = ResBlock(c, c)
self.res1 = ResBlock(c, c)
self.res2 = ResBlock(c, c)
self.res3 = ResBlock(c, c)
self.res4 = ResBlock(c, c)
self.res5 = ResBlock(c, c)
self.conv1 = nn.Conv2d(c, 8, 3, 1, 1)
self.up = nn.PixelShuffle(2)
def forward(self, x):
if self.scale != 1:
x = F.interpolate(x, scale_factor=1. / self.scale, mode="bilinear",
align_corners=False)
x = self.conv0(x)
x = self.res0(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = self.conv1(x)
flow = self.up(x)
if self.scale != 1:
flow = F.interpolate(flow, scale_factor=self.scale, mode="bilinear",
align_corners=False)
return flow
class IFNet(nn.Module):
def __init__(self):
super(IFNet, self).__init__()
self.block0 = IFBlock(6, scale=8, c=192)
self.block1 = IFBlock(8, scale=4, c=128)
self.block2 = IFBlock(8, scale=2, c=96)
self.block3 = IFBlock(8, scale=1, c=48)
def forward(self, x):
x = F.interpolate(x, scale_factor=0.5, mode="bilinear",
align_corners=False)
flow0 = self.block0(x)
F1 = flow0
warped_img0 = warp(x[:, :3], F1)
warped_img1 = warp(x[:, 3:], -F1)
flow1 = self.block1(torch.cat((warped_img0, warped_img1, F1), 1))
F2 = (flow0 + flow1)
warped_img0 = warp(x[:, :3], F2)
warped_img1 = warp(x[:, 3:], -F2)
flow2 = self.block2(torch.cat((warped_img0, warped_img1, F2), 1))
F3 = (flow0 + flow1 + flow2)
warped_img0 = warp(x[:, :3], F3)
warped_img1 = warp(x[:, 3:], -F3)
flow3 = self.block3(torch.cat((warped_img0, warped_img1, F3), 1))
F4 = (flow0 + flow1 + flow2 + flow3)
return F4, [F1, F2, F3, F4]
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
flownet = IFNet()
flow, _ = flownet(imgs)
print(flow.shape)

View File

@ -74,15 +74,15 @@ class ContextNet(nn.Module):
f1 = warp(x, flow)
x = self.conv2(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False) * 0.5
align_corners=False) * 0.5
f2 = warp(x, flow)
x = self.conv3(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False) * 0.5
align_corners=False) * 0.5
f3 = warp(x, flow)
x = self.conv4(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False) * 0.5
align_corners=False) * 0.5
f4 = warp(x, flow)
return [f1, f2, f3, f4]
@ -158,14 +158,17 @@ class Model:
self.contextnet.to(device)
self.fusionnet.to(device)
def load_model(self, path, rank=0):
def load_model(self, path, rank):
def convert(param):
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
if rank == 0:
if rank == -1:
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
else:
return param
if rank <= 0:
self.flownet.load_state_dict(
convert(torch.load('{}/flownet.pkl'.format(path), map_location=device)))
self.contextnet.load_state_dict(
@ -173,12 +176,10 @@ class Model:
self.fusionnet.load_state_dict(
convert(torch.load('{}/unet.pkl'.format(path), map_location=device)))
def save_model(self, path, rank=0):
def save_model(self, path, rank):
if rank == 0:
torch.save(self.flownet.state_dict(),
'{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(),
'{}/contextnet.pkl'.format(path))
torch.save(self.flownet.state_dict(), '{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(), '{}/contextnet.pkl'.format(path))
torch.save(self.fusionnet.state_dict(), '{}/unet.pkl'.format(path))
def predict(self, imgs, flow, training=True, flow_gt=None):
@ -187,7 +188,7 @@ class Model:
c0 = self.contextnet(img0, flow)
c1 = self.contextnet(img1, -flow)
flow = F.interpolate(flow, scale_factor=2.0, mode="bilinear",
align_corners=False, recompute_scale_factor=False) * 2.0
align_corners=False) * 2.0
refine_output, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.fusionnet(
img0, img1, flow, c0, c1, flow_gt)
res = torch.sigmoid(refine_output[:, :3]) * 2 - 1
@ -201,9 +202,8 @@ class Model:
return pred
def inference(self, img0, img1):
with torch.no_grad():
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
return self.predict(imgs, flow, training=False).detach()
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
@ -223,9 +223,9 @@ class Model:
loss_mask = torch.abs(
merged_img - gt).sum(1, True).float().detach()
loss_mask = F.interpolate(loss_mask, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False).detach()
align_corners=False).detach()
flow_gt = (F.interpolate(flow_gt, scale_factor=0.5, mode="bilinear",
align_corners=False, recompute_scale_factor=False) * 0.5).detach()
align_corners=False) * 0.5).detach()
loss_cons = 0
for i in range(3):
loss_cons += self.epe(flow_list[i], flow_gt[:, :2], 1)

View File

@ -0,0 +1,250 @@
import torch
import torch.nn as nn
import numpy as np
from torch.optim import AdamW
import torch.optim as optim
import itertools
from model.warplayer import warp
from torch.nn.parallel import DistributedDataParallel as DDP
from model.IFNet2F import *
import torch.nn.functional as F
from model.loss import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.PReLU(out_planes)
)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=4, stride=2, padding=1, bias=True),
nn.PReLU(out_planes)
)
def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=2):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_woact(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
c = 16
class ContextNet(nn.Module):
def __init__(self):
super(ContextNet, self).__init__()
self.conv1 = ResBlock(3, c, 1)
self.conv2 = ResBlock(c, 2*c)
self.conv3 = ResBlock(2*c, 4*c)
self.conv4 = ResBlock(4*c, 8*c)
def forward(self, x, flow):
x = self.conv1(x)
f1 = warp(x, flow)
x = self.conv2(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f2 = warp(x, flow)
x = self.conv3(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f3 = warp(x, flow)
x = self.conv4(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f4 = warp(x, flow)
return [f1, f2, f3, f4]
class FusionNet(nn.Module):
def __init__(self):
super(FusionNet, self).__init__()
self.down0 = ResBlock(8, 2*c, 1)
self.down1 = ResBlock(4*c, 4*c)
self.down2 = ResBlock(8*c, 8*c)
self.down3 = ResBlock(16*c, 16*c)
self.up0 = deconv(32*c, 8*c)
self.up1 = deconv(16*c, 4*c)
self.up2 = deconv(8*c, 2*c)
self.up3 = deconv(4*c, c)
self.conv = nn.Conv2d(c, 4, 3, 2, 1)
def forward(self, img0, img1, flow, c0, c1, flow_gt):
warped_img0 = warp(img0, flow)
warped_img1 = warp(img1, -flow)
if flow_gt == None:
warped_img0_gt, warped_img1_gt = None, None
else:
warped_img0_gt = warp(img0, flow_gt[:, :2])
warped_img1_gt = warp(img1, flow_gt[:, 2:4])
s0 = self.down0(torch.cat((warped_img0, warped_img1, flow), 1))
s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
x = self.up1(torch.cat((x, s2), 1))
x = self.up2(torch.cat((x, s1), 1))
x = self.up3(torch.cat((x, s0), 1))
x = self.conv(x)
return x, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
class Model:
def __init__(self, local_rank=-1):
self.flownet = IFNet()
self.contextnet = ContextNet()
self.fusionnet = FusionNet()
self.device()
self.optimG = AdamW(itertools.chain(
self.flownet.parameters(),
self.contextnet.parameters(),
self.fusionnet.parameters()), lr=1e-6, weight_decay=1e-5)
self.schedulerG = optim.lr_scheduler.CyclicLR(
self.optimG, base_lr=1e-6, max_lr=1e-3, step_size_up=8000, cycle_momentum=False)
self.epe = EPE()
self.ter = Ternary()
self.sobel = SOBEL()
if local_rank != -1:
self.flownet = DDP(self.flownet, device_ids=[
local_rank], output_device=local_rank)
self.contextnet = DDP(self.contextnet, device_ids=[
local_rank], output_device=local_rank)
self.fusionnet = DDP(self.fusionnet, device_ids=[
local_rank], output_device=local_rank)
def train(self):
self.flownet.train()
self.contextnet.train()
self.fusionnet.train()
def eval(self):
self.flownet.eval()
self.contextnet.eval()
self.fusionnet.eval()
def device(self):
self.flownet.to(device)
self.contextnet.to(device)
self.fusionnet.to(device)
def load_model(self, path, rank=0):
def convert(param):
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
if rank == 0:
self.flownet.load_state_dict(
convert(torch.load('{}/flownet.pkl'.format(path), map_location=device)))
self.contextnet.load_state_dict(
convert(torch.load('{}/contextnet.pkl'.format(path), map_location=device)))
self.fusionnet.load_state_dict(
convert(torch.load('{}/unet.pkl'.format(path), map_location=device)))
def save_model(self, path, rank=0):
if rank == 0:
torch.save(self.flownet.state_dict(),
'{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(),
'{}/contextnet.pkl'.format(path))
torch.save(self.fusionnet.state_dict(), '{}/unet.pkl'.format(path))
def predict(self, imgs, flow, training=True, flow_gt=None):
img0 = imgs[:, :3]
img1 = imgs[:, 3:]
flow = F.interpolate(flow, scale_factor=2.0, mode="bilinear",
align_corners=False) * 2.0
c0 = self.contextnet(img0, flow)
c1 = self.contextnet(img1, -flow)
refine_output, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.fusionnet(
img0, img1, flow, c0, c1, flow_gt)
res = torch.sigmoid(refine_output[:, :3]) * 2 - 1
mask = torch.sigmoid(refine_output[:, 3:4])
merged_img = warped_img0 * mask + warped_img1 * (1 - mask)
pred = merged_img + res
pred = torch.clamp(pred, 0, 1)
if training:
return pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
else:
return pred
def inference(self, img0, img1):
with torch.no_grad():
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
return self.predict(imgs, flow, training=False).detach()
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
for param_group in self.optimG.param_groups:
param_group['lr'] = learning_rate
if training:
self.train()
else:
self.eval()
flow, flow_list = self.flownet(imgs)
pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.predict(
imgs, flow, flow_gt=flow_gt)
loss_ter = self.ter(pred, gt).mean()
if training:
with torch.no_grad():
loss_flow = torch.abs(warped_img0_gt - gt).mean()
loss_mask = torch.abs(
merged_img - gt).sum(1, True).float().detach()
loss_cons = 0
for i in range(3):
loss_cons += self.epe(flow_list[i], flow_gt[:, :2], 1)
loss_cons += self.epe(-flow_list[i], flow_gt[:, 2:4], 1)
loss_cons = loss_cons.mean() * 0.01
else:
loss_cons = torch.tensor([0])
loss_flow = torch.abs(warped_img0 - gt).mean()
loss_mask = 1
loss_l1 = (((pred - gt) ** 2 + 1e-6) ** 0.5).mean()
if training:
self.optimG.zero_grad()
loss_G = loss_l1 + loss_cons + loss_ter
loss_G.backward()
self.optimG.step()
return pred, merged_img, flow, loss_l1, loss_flow, loss_cons, loss_ter, loss_mask
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
model = Model()
model.eval()
print(model.inference(imgs).shape)

View File

@ -0,0 +1,260 @@
import torch
import torch.nn as nn
import numpy as np
from torch.optim import AdamW
import torch.optim as optim
import itertools
from model.warplayer import warp
from torch.nn.parallel import DistributedDataParallel as DDP
from model.IFNet_HD import *
import torch.nn.functional as F
from model.loss import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.PReLU(out_planes)
)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=4, stride=2, padding=1, bias=True),
nn.PReLU(out_planes)
)
def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=2):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_woact(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
c = 32
class ContextNet(nn.Module):
def __init__(self):
super(ContextNet, self).__init__()
self.conv0 = conv(3, c, 3, 2, 1)
self.conv1 = ResBlock(c, c)
self.conv2 = ResBlock(c, 2*c)
self.conv3 = ResBlock(2*c, 4*c)
self.conv4 = ResBlock(4*c, 8*c)
def forward(self, x, flow):
x = self.conv0(x)
x = self.conv1(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
f1 = warp(x, flow)
x = self.conv2(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f2 = warp(x, flow)
x = self.conv3(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f3 = warp(x, flow)
x = self.conv4(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f4 = warp(x, flow)
return [f1, f2, f3, f4]
class FusionNet(nn.Module):
def __init__(self):
super(FusionNet, self).__init__()
self.conv0 = conv(8, c, 3, 2, 1)
self.down0 = ResBlock(c, 2*c)
self.down1 = ResBlock(4*c, 4*c)
self.down2 = ResBlock(8*c, 8*c)
self.down3 = ResBlock(16*c, 16*c)
self.up0 = deconv(32*c, 8*c)
self.up1 = deconv(16*c, 4*c)
self.up2 = deconv(8*c, 2*c)
self.up3 = deconv(4*c, c)
self.conv = nn.Conv2d(c, 16, 3, 1, 1)
self.up4 = nn.PixelShuffle(2)
def forward(self, img0, img1, flow, c0, c1, flow_gt):
warped_img0 = warp(img0, flow)
warped_img1 = warp(img1, -flow)
if flow_gt == None:
warped_img0_gt, warped_img1_gt = None, None
else:
warped_img0_gt = warp(img0, flow_gt[:, :2])
warped_img1_gt = warp(img1, flow_gt[:, 2:4])
x = self.conv0(torch.cat((warped_img0, warped_img1, flow), 1))
s0 = self.down0(x)
s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
x = self.up1(torch.cat((x, s2), 1))
x = self.up2(torch.cat((x, s1), 1))
x = self.up3(torch.cat((x, s0), 1))
x = self.up4(self.conv(x))
return x, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
class Model:
def __init__(self, local_rank=-1):
self.flownet = IFNet()
self.contextnet = ContextNet()
self.fusionnet = FusionNet()
self.device()
self.optimG = AdamW(itertools.chain(
self.flownet.parameters(),
self.contextnet.parameters(),
self.fusionnet.parameters()), lr=1e-6, weight_decay=1e-5)
self.schedulerG = optim.lr_scheduler.CyclicLR(
self.optimG, base_lr=1e-6, max_lr=1e-3, step_size_up=8000, cycle_momentum=False)
self.epe = EPE()
self.ter = Ternary()
self.sobel = SOBEL()
if local_rank != -1:
self.flownet = DDP(self.flownet, device_ids=[
local_rank], output_device=local_rank)
self.contextnet = DDP(self.contextnet, device_ids=[
local_rank], output_device=local_rank)
self.fusionnet = DDP(self.fusionnet, device_ids=[
local_rank], output_device=local_rank)
def train(self):
self.flownet.train()
self.contextnet.train()
self.fusionnet.train()
def eval(self):
self.flownet.eval()
self.contextnet.eval()
self.fusionnet.eval()
def device(self):
self.flownet.to(device)
self.contextnet.to(device)
self.fusionnet.to(device)
def load_model(self, path, rank):
def convert(param):
if rank == -1:
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
else:
return param
if rank <= 0:
self.flownet.load_state_dict(
convert(torch.load('{}/flownet.pkl'.format(path), map_location=device)))
self.contextnet.load_state_dict(
convert(torch.load('{}/contextnet.pkl'.format(path), map_location=device)))
self.fusionnet.load_state_dict(
convert(torch.load('{}/unet.pkl'.format(path), map_location=device)))
def save_model(self, path, rank):
if rank == 0:
torch.save(self.flownet.state_dict(), '{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(), '{}/contextnet.pkl'.format(path))
torch.save(self.fusionnet.state_dict(), '{}/unet.pkl'.format(path))
def predict(self, imgs, flow, training=True, flow_gt=None):
img0 = imgs[:, :3]
img1 = imgs[:, 3:]
c0 = self.contextnet(img0, flow)
c1 = self.contextnet(img1, -flow)
flow = F.interpolate(flow, scale_factor=2.0, mode="bilinear",
align_corners=False) * 2.0
refine_output, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.fusionnet(
img0, img1, flow, c0, c1, flow_gt)
res = torch.sigmoid(refine_output[:, :3]) * 2 - 1
mask = torch.sigmoid(refine_output[:, 3:4])
merged_img = warped_img0 * mask + warped_img1 * (1 - mask)
pred = merged_img + res
pred = torch.clamp(pred, 0, 1)
if training:
return pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
else:
return pred
def inference(self, img0, img1):
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
return self.predict(imgs, flow, training=False).detach()
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
for param_group in self.optimG.param_groups:
param_group['lr'] = learning_rate
if training:
self.train()
else:
self.eval()
flow, flow_list = self.flownet(imgs)
pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.predict(
imgs, flow, flow_gt=flow_gt)
loss_ter = self.ter(pred, gt).mean()
if training:
with torch.no_grad():
loss_flow = torch.abs(warped_img0_gt - gt).mean()
loss_mask = torch.abs(
merged_img - gt).sum(1, True).float().detach()
loss_mask = F.interpolate(loss_mask, scale_factor=0.5, mode="bilinear",
align_corners=False).detach()
flow_gt = (F.interpolate(flow_gt, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5).detach()
loss_cons = 0
for i in range(3):
loss_cons += self.epe(flow_list[i], flow_gt[:, :2], 1)
loss_cons += self.epe(-flow_list[i], flow_gt[:, 2:4], 1)
loss_cons = loss_cons.mean() * 0.01
else:
loss_cons = torch.tensor([0])
loss_flow = torch.abs(warped_img0 - gt).mean()
loss_mask = 1
loss_l1 = (((pred - gt) ** 2 + 1e-6) ** 0.5).mean()
if training:
self.optimG.zero_grad()
loss_G = loss_l1 + loss_cons + loss_ter
loss_G.backward()
self.optimG.step()
return pred, merged_img, flow, loss_l1, loss_flow, loss_cons, loss_ter, loss_mask
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
model = Model()
model.eval()
print(model.inference(imgs).shape)

Binary file not shown.

Binary file not shown.

Binary file not shown.