C# FFmpeg合并多个音频文件,并指定每段音频的插入时间。
合成音频的FFmpeg命令为:
ffmpeg -i 11.wav -i 13.wav -filter_complex "[0]adelay=1s:all=1[0a];[1]adelay=26s:all=1[1a];[0a][1a]amix=inputs=2[a]" -map "[a]" output.wav
不知道具体啥意思,大概猜测一下,
-i <fileName>
输入文件的路径,有几个文件就几个-i命令
[0]adelay=1s:all=1[0a]
[0]和[0a]指的是第几个文件,索引从0开始,adelay=1s
从音频的第一秒开始插入,默认是毫秒,adelay=1000, 如果加后缀s,则指的秒数,此时不支持小数位写法。(4.2版本以下不支持1s这种写法),all:Use last set delay for all remaining channels. By default is disabled. This option if enabled changes how option delays is interpreted.
amix=inputs=2
2指的是一共有几个文件
output.wav
输出文件的路径或者文件名
文件名都支持全路径的写法。可以是 C:\Data\CHN\Other\FFmpegMergeMp3\11.wav
也可以是 11.wav
ffmpeg版本:https://www.gyan.dev/ffmpeg/builds/ffmpeg-git-essentials.7z
latest git master branch build version: 2021-11-25-git-522f577d7e
使用 <package id="Xabe.FFmpeg" version="5.0.2" targetFramework="net48" />
包来执行命令
public static string FFMPEGPath { get; set; } = @"C:\Data\PATHVariables";
static async Task Main(string[] args)
{
string inputFile1 = @"C:\Data\CHN\Other\FFmpegMergeMp3\11.wav";
string inputFile3 = @"C:\Data\CHN\Other\FFmpegMergeMp3\13.wav";
string outputPath = $@"C:\Data\CHN\Other\FFmpegMergeMp3\{Guid.NewGuid()}.wav";
var listFile = new List<TextToSppechConversionModel>();
listFile.Add(new TextToSppechConversionModel() { Title = "Section 1", AudioFilePath = inputFile1, StartTime = "00:0:05" });
listFile.Add(new TextToSppechConversionModel() { Title = "Section 3", AudioFilePath = inputFile3, StartTime = "00:0:15" });
await new Program().CombineAudioToOneFile(listFile, outputPath);
}
public async Task CombineAudioToOneFile(List<TextToSppechConversionModel> listSourceFile, string outputPath)
{
FFmpeg.SetExecutablesPath(FFMPEGPath, "ffmpeg", "ffprobe");
//https://stackoverflow.com/questions/60027460/how-to-add-multiple-audio-files-at-specific-times-on-a-silence-audio-file-using
//https://www.ffmpeg.org/ffmpeg-all.html#adelay
//https://ffmpeg.org/ffmpeg-filters.html#amix
//string inputFile1 = @"C:\Data\CHN\Other\FFmpegMergeMp3\11.wav";
//string inputFile3 = @"C:\Data\CHN\Other\FFmpegMergeMp3\13.wav";
//outputPath = $@"C:\Data\CHN\Other\FFmpegMergeMp3\{Guid.NewGuid()}.wav";
//var mediaFile1 = await FFmpeg.GetMediaInfo(inputFile1);
//var mediaFile3 = await FFmpeg.GetMediaInfo(inputFile3);
//var audioStream1 = mediaFile1.AudioStreams.FirstOrDefault();
//var audioStream3 = mediaFile3.AudioStreams.FirstOrDefault();
//audioStream1 = audioStream1.SetCodec(AudioCodec.aac);
//audioStream3 = audioStream3.SetCodec(AudioCodec.aac);
//var conversion = FFmpeg.Conversions.New().AddStream(new List<IStream>() { audioStream1, audioStream3 }).SetOutput(outputPath);
/*
* ffmpeg -i 11.wav -i 13.wav -filter_complex \"[0]adelay=1s:all=1[0a];[1]adelay=26s:all=1[1a];[0a][1a]amix=inputs=2[a]\"
* -map \"[a]\" output11132.wav
*/
//string param = $" -i {inputFile1} -i {inputFile3} -filter_complex \"[0]adelay=1s:all=1[0a];[1]adelay=26s:all=1[1a];[0a][1a]amix=inputs=2[a]\" -map \"[a]\" {outputPath}";
//var conversionResult = await conversion.Start(param);
StringBuilder sbInputFile = new StringBuilder();
StringBuilder sbAdelay = new StringBuilder();
var listStream = new List<IStream>();
var strA = "";
for (int i = 0; i < listSourceFile.Count; i++)
{
var sourceModel = listSourceFile[i];
if (!File.Exists(sourceModel.AudioFilePath))
throw new Exception($"the file {sourceModel.AudioFilePath} not found.");
var mediaFile1 = await FFmpeg.GetMediaInfo(sourceModel.AudioFilePath);
var audioStream1 = mediaFile1.AudioStreams.FirstOrDefault().SetCodec(AudioCodec.ac3);
listStream.Add(audioStream1);
//sourceModel.StartTime;//"00:01:02"
var timeSpan = DateTime.Parse(sourceModel.StartTime) - DateTime.Parse("00:00:00");
var startSeconds = timeSpan.TotalSeconds; //seconds
//check current length
if (i + 1 < listSourceFile.Count)
{
//get next start time
var nextStartTime = listSourceFile[i + 1].StartTime;
var timeSpanNext = DateTime.Parse(nextStartTime) - DateTime.Parse("00:00:00");
var currEndTime = mediaFile1.Duration.TotalSeconds + timeSpan.TotalSeconds;
if (currEndTime > timeSpanNext.TotalSeconds)
{
var endTimeStr = DateTime.Parse("00:00:00").AddSeconds(currEndTime).ToString("HH:mm:ss");
throw new Exception($"the section [{sourceModel.Title}] media end time({endTimeStr}) is greater than next section[{listSourceFile[i + 1].Title}] start time({nextStartTime})");
}
}
sbInputFile.Append($" -i {sourceModel.AudioFilePath}");
//[0]adelay=1s:all=1[0a];
//[1]adelay=26s:all=1[1a];
//[0a][1a]
sbAdelay.Append($"[{i}]adelay={startSeconds}s:all=1[{i}a];");
strA += $"[{i}a]";
}
//param
/*
* ffmpeg -i 11.wav -i 13.wav -filter_complex \"[0]adelay=1s:all=1[0a];[1]adelay=26s:all=1[1a];[0a][1a]amix=inputs=2[a]\"
* -map \"[a]\" output11132.wav
*/
string param = $"{sbInputFile} -filter_complex \"{sbAdelay}{strA}amix=inputs={listSourceFile.Count}[a]\" -map \"[a]\" {outputPath}";
var conversion = FFmpeg.Conversions.New().AddStream(listStream).SetOutput(outputPath);
var conversionResult = await conversion.Start(param);
//return conversionResult.Duration;
}
internal class TextToSppechConversionModel
{
public string Title { get; set; }
public string AudioFilePath { get; set; }
public string StartTime { get; set; }
}
参考:
//https://stackoverflow.com/questions/60027460/how-to-add-multiple-audio-files-at-specific-times-on-a-silence-audio-file-using
//https://www.ffmpeg.org/ffmpeg-all.html#adelay
//https://ffmpeg.org/ffmpeg-filters.html#amix
//https://ffmpeg.xabe.net/docs.html
//https://ffmpeg.xabe.net/tutorial.html