| function draw_coalA1_audio_canvas() { |
| var coalUrl = "/tcc/front/coalAudio"; |
| var gangueUrl = "/tcc/front/gangueAudio"; |
| var canvas = document.getElementById("coalA1_audio_canvas"); |
| var canvasCtx = canvas.getContext("2d"); |
| var audioContext = new AudioContext(); |
| var filter = audioContext.createBiquadFilter(); |
| |
| let url2 = ""; |
| if (pickup_isBreak == 0 && audio_type == 0) { |
| url2 = coalUrl; |
| } else if (pickup_isBreak == 0 && audio_type == 1) { |
| url2 = gangueUrl; |
| } |
| drawAudio(url2, canvas, canvasCtx, audioContext, filter); |
| |
| setInterval(function () { |
| let url = ""; |
| if (pickup_isBreak == 0 && audio_type == 0) { |
| url = coalUrl; |
| } else if (pickup_isBreak == 0 && audio_type == 1) { |
| url = gangueUrl; |
| } |
| drawAudio(url, canvas, canvasCtx, audioContext, filter); |
| }, 1000); |
| } |
| |
| function drawAudio(url, canvas, canvasCtx, audioContext, filter) { |
| var request = new XMLHttpRequest(); //开一个请求 |
| request.open('POST', url, true); //往url请求数据 |
| request.responseType = 'arraybuffer'; //设置返回数据类型 |
| request.onload = function () { |
| var audioData = request.response; |
| if (audioData.byteLength < (1000 * 100)) { |
| return 1; |
| } |
| audioContext.decodeAudioData(audioData, function (buffer) { |
| var audioBufferSourceNode = audioContext.createBufferSource(); |
| var analyser = audioContext.createAnalyser(); |
| analyser.fftSize = 2048; |
| audioBufferSourceNode.connect(analyser); |
| analyser.connect(audioContext.destination); |
| audioBufferSourceNode.buffer = buffer; //回调函数传入的参数 |
| audioBufferSourceNode.start(0); //部分浏览器是noteOn()函数,用法相同 |
| document.documentElement.removeEventListener('mouseenter', null, false); |
| document.documentElement.addEventListener('mouseenter', () => { |
| if (audioBufferSourceNode.context.state !== 'running') |
| audioBufferSourceNode.context.resume(); |
| }); |
| filter.type = 'highpass'; |
| filter.frequency.value = 600; |
| filter.Q.value = 800; |
| var bufferLength = analyser.frequencyBinCount; |
| var dataArray = new Uint8Array(bufferLength); |
| canvasCtx.clearRect(0, 0, 300, 300); |
| |
| function draw() { |
| drawVisual = requestAnimationFrame(draw); |
| analyser.getByteTimeDomainData(dataArray); |
| canvasCtx.fillStyle = '#0e1a3b'; |
| canvasCtx.fillRect(0, 0, 300, 400); |
| canvasCtx.lineWidth = 2; |
| canvasCtx.strokeStyle = '#ffffff'; |
| canvasCtx.beginPath(); |
| var sliceWidth = 300 * 1.0 / bufferLength; |
| var x = 0; |
| for (var i = 0; i < bufferLength; i++) { |
| var v = dataArray[i] / 128.0; |
| var y = v * 200 / 3; //控制音频线在图的位置 |
| if (i === 0) { |
| canvasCtx.moveTo(x, y); |
| } else { |
| canvasCtx.lineTo(x, y); |
| } |
| x += sliceWidth; |
| } |
| canvasCtx.lineTo(canvas.width, canvas.height / 2); |
| canvasCtx.stroke(); |
| }; |
| draw(); |
| }, function (err) { |
| console.log("!Fail to decode the file!" + url, err) |
| }); |
| }; |
| request.send(); |
| } |
| |
| function playCoalAudio1() { |
| var url = "/tcc/front/coalAudio"; |
| var canvas = document.getElementById("coalA1_audio_canvas"); |
| var canvasCtx = canvas.getContext("2d"); |
| //首先实例化AudioContext对象 很遗憾浏览器不兼容,只能用兼容性写法;audioContext用于音频处理的接口,并且工作原理是将AudioContext创建出来的各种节点(AudioNode)相互连接,音频数据流经这些节点并作出相应处理。 |
| //总结就一句话 AudioContext 是音频对象,就像 new Date()是一个时间对象一样 |
| var AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext; |
| if (!AudioContext) { |
| alert("您的浏览器不支持audio API,请更换浏览器(chrome、firefox)再尝试,另外本人强烈建议使用谷歌浏览器!") |
| } |
| var audioContext = new AudioContext(); //实例化 |
| var filter = audioContext.createBiquadFilter(); |
| |
| // 总结一下接下来的步骤 |
| // 1 先获取音频文件(目前只支持单个上传) |
| // 2 读取音频文件,读取后,获得二进制类型的音频文件 |
| // 3 对读取后的二进制文件进行解码 |
| |
| function getData() { |
| var request = new XMLHttpRequest(); //开一个请求 |
| request.open('POST', url, true); //往url请求数据 |
| request.responseType = 'arraybuffer'; //设置返回数据类型 |
| request.onload = function () { |
| var audioData = request.response; |
| //数据缓冲完成之后,进行解码 |
| audioContext.decodeAudioData(audioData, function (buffer) { |
| //source.buffer = buffer; //将解码出来的数据放入source中 |
| // 创建AudioBufferSourceNode 用于播放解码出来的buffer的节点 |
| var audioBufferSourceNode = audioContext.createBufferSource(); |
| // 创建AnalyserNode 用于分析音频频谱的节点 |
| var analyser = audioContext.createAnalyser(); |
| //fftSize (Fast Fourier Transform) 是快速傅里叶变换,一般情况下是固定值2048。具体作用是什么我也不太清除,但是经过研究, |
| // 这个值可以决定音频频谱的密集程度。值大了,频谱就松散,值小就密集。 |
| // analyser.fftSize = 256; |
| analyser.fftSize = 2048; |
| // 连接节点,audioContext.destination是音频要最终输出的目标, |
| // 我们可以把它理解为声卡。所以所有节点中的最后一个节点应该再 |
| // 连接到audioContext.destination才能听到声音。 |
| audioBufferSourceNode.connect(analyser); |
| analyser.connect(audioContext.destination); |
| //console.log(audioContext.destination) |
| // 播放音频 |
| audioBufferSourceNode.buffer = buffer; //回调函数传入的参数 |
| audioBufferSourceNode.start(0); //部分浏览器是noteOn()函数,用法相同 |
| |
| document.documentElement.removeEventListener('mouseenter', null, false); |
| document.documentElement.addEventListener('mouseenter', () => { |
| if (audioBufferSourceNode.context.state !== 'running') |
| audioBufferSourceNode.context.resume(); |
| }); |
| |
| //波形设置 |
| filter.type = 'highpass'; |
| filter.frequency.value = 600; |
| filter.Q.value = 800; |
| |
| //可视化 创建数据 |
| var bufferLength = analyser.frequencyBinCount; |
| var dataArray = new Uint8Array(bufferLength); |
| canvasCtx.clearRect(0, 0, 300, 300); |
| |
| function draw() { |
| drawVisual = requestAnimationFrame(draw); |
| analyser.getByteTimeDomainData(dataArray); |
| canvasCtx.fillStyle = '#0e1a3b'; |
| canvasCtx.fillRect(0, 0, 300, 400); |
| canvasCtx.lineWidth = 2; |
| canvasCtx.strokeStyle = '#ffffff'; |
| |
| canvasCtx.beginPath(); |
| var sliceWidth = 300 * 1.0 / bufferLength; |
| var x = 0; |
| for (var i = 0; i < bufferLength; i++) { |
| var v = dataArray[i] / 128.0; |
| var y = v * 200 / 3; //控制音频线在图的位置 |
| if (i === 0) { |
| canvasCtx.moveTo(x, y); |
| } else { |
| canvasCtx.lineTo(x, y); |
| } |
| x += sliceWidth; |
| } |
| canvasCtx.lineTo(canvas.width, canvas.height / 2); |
| canvasCtx.stroke(); |
| }; |
| |
| draw(); |
| }, function (err) { |
| // alert('!Fail to decode the file!'); //解码出错处理 |
| console.log("!Fail to decode the file!", err) |
| }); |
| }; |
| request.send(); |
| } |
| |
| if (pickup_isBreak == 0 && audio_type == 0) { |
| getData(); |
| } |
| |
| setInterval(function () { |
| if (pickup_isBreak == 0 && audio_type == 0) { |
| getData(); |
| } |
| }, 1000); |
| } |
| |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
· winform 绘制太阳,地球,月球 运作规律
2018-10-30 14 字符字符串和文本处理
2018-10-30 15、枚举类型和标志位
2018-10-30 Oracle 数据库连接的一些坑