audio-frame-utils.js 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. "use strict";
  2. /**
  3. * mux.js
  4. *
  5. * Copyright (c) Brightcove
  6. * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE
  7. */
  8. var coneOfSilence = require('../data/silence');
  9. var clock = require('../utils/clock');
  10. /**
  11. * Sum the `byteLength` properties of the data in each AAC frame
  12. */
  13. var sumFrameByteLengths = function sumFrameByteLengths(array) {
  14. var i,
  15. currentObj,
  16. sum = 0; // sum the byteLength's all each nal unit in the frame
  17. for (i = 0; i < array.length; i++) {
  18. currentObj = array[i];
  19. sum += currentObj.data.byteLength;
  20. }
  21. return sum;
  22. }; // Possibly pad (prefix) the audio track with silence if appending this track
  23. // would lead to the introduction of a gap in the audio buffer
  24. var prefixWithSilence = function prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) {
  25. var baseMediaDecodeTimeTs,
  26. frameDuration = 0,
  27. audioGapDuration = 0,
  28. audioFillFrameCount = 0,
  29. audioFillDuration = 0,
  30. silentFrame,
  31. i,
  32. firstFrame;
  33. if (!frames.length) {
  34. return;
  35. }
  36. baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills
  37. frameDuration = Math.ceil(clock.ONE_SECOND_IN_TS / (track.samplerate / 1024));
  38. if (audioAppendStartTs && videoBaseMediaDecodeTime) {
  39. // insert the shortest possible amount (audio gap or audio to video gap)
  40. audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap
  41. audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);
  42. audioFillDuration = audioFillFrameCount * frameDuration;
  43. } // don't attempt to fill gaps smaller than a single frame or larger
  44. // than a half second
  45. if (audioFillFrameCount < 1 || audioFillDuration > clock.ONE_SECOND_IN_TS / 2) {
  46. return;
  47. }
  48. silentFrame = coneOfSilence()[track.samplerate];
  49. if (!silentFrame) {
  50. // we don't have a silent frame pregenerated for the sample rate, so use a frame
  51. // from the content instead
  52. silentFrame = frames[0].data;
  53. }
  54. for (i = 0; i < audioFillFrameCount; i++) {
  55. firstFrame = frames[0];
  56. frames.splice(0, 0, {
  57. data: silentFrame,
  58. dts: firstFrame.dts - frameDuration,
  59. pts: firstFrame.pts - frameDuration
  60. });
  61. }
  62. track.baseMediaDecodeTime -= Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate));
  63. return audioFillDuration;
  64. }; // If the audio segment extends before the earliest allowed dts
  65. // value, remove AAC frames until starts at or after the earliest
  66. // allowed DTS so that we don't end up with a negative baseMedia-
  67. // DecodeTime for the audio track
  68. var trimAdtsFramesByEarliestDts = function trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts) {
  69. if (track.minSegmentDts >= earliestAllowedDts) {
  70. return adtsFrames;
  71. } // We will need to recalculate the earliest segment Dts
  72. track.minSegmentDts = Infinity;
  73. return adtsFrames.filter(function (currentFrame) {
  74. // If this is an allowed frame, keep it and record it's Dts
  75. if (currentFrame.dts >= earliestAllowedDts) {
  76. track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);
  77. track.minSegmentPts = track.minSegmentDts;
  78. return true;
  79. } // Otherwise, discard it
  80. return false;
  81. });
  82. }; // generate the track's raw mdat data from an array of frames
  83. var generateSampleTable = function generateSampleTable(frames) {
  84. var i,
  85. currentFrame,
  86. samples = [];
  87. for (i = 0; i < frames.length; i++) {
  88. currentFrame = frames[i];
  89. samples.push({
  90. size: currentFrame.data.byteLength,
  91. duration: 1024 // For AAC audio, all samples contain 1024 samples
  92. });
  93. }
  94. return samples;
  95. }; // generate the track's sample table from an array of frames
  96. var concatenateFrameData = function concatenateFrameData(frames) {
  97. var i,
  98. currentFrame,
  99. dataOffset = 0,
  100. data = new Uint8Array(sumFrameByteLengths(frames));
  101. for (i = 0; i < frames.length; i++) {
  102. currentFrame = frames[i];
  103. data.set(currentFrame.data, dataOffset);
  104. dataOffset += currentFrame.data.byteLength;
  105. }
  106. return data;
  107. };
  108. module.exports = {
  109. prefixWithSilence: prefixWithSilence,
  110. trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts,
  111. generateSampleTable: generateSampleTable,
  112. concatenateFrameData: concatenateFrameData
  113. };