transmuxer.js 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. var Stream = require('../utils/stream.js');
  2. var m2ts = require('../m2ts/m2ts.js');
  3. var codecs = require('../codecs/index.js');
  4. var AudioSegmentStream = require('./audio-segment-stream.js');
  5. var VideoSegmentStream = require('./video-segment-stream.js');
  6. var trackInfo = require('../mp4/track-decode-info.js');
  7. var isLikelyAacData = require('../aac/utils').isLikelyAacData;
  8. var AdtsStream = require('../codecs/adts');
  9. var AacStream = require('../aac/index');
  10. var clock = require('../utils/clock');
  11. var createPipeline = function(object) {
  12. object.prototype = new Stream();
  13. object.prototype.init.call(object);
  14. return object;
  15. };
  16. var tsPipeline = function(options) {
  17. var
  18. pipeline = {
  19. type: 'ts',
  20. tracks: {
  21. audio: null,
  22. video: null
  23. },
  24. packet: new m2ts.TransportPacketStream(),
  25. parse: new m2ts.TransportParseStream(),
  26. elementary: new m2ts.ElementaryStream(),
  27. timestampRollover: new m2ts.TimestampRolloverStream(),
  28. adts: new codecs.Adts(),
  29. h264: new codecs.h264.H264Stream(),
  30. captionStream: new m2ts.CaptionStream(options),
  31. metadataStream: new m2ts.MetadataStream()
  32. };
  33. pipeline.headOfPipeline = pipeline.packet;
  34. // Transport Stream
  35. pipeline.packet
  36. .pipe(pipeline.parse)
  37. .pipe(pipeline.elementary)
  38. .pipe(pipeline.timestampRollover);
  39. // H264
  40. pipeline.timestampRollover
  41. .pipe(pipeline.h264);
  42. // Hook up CEA-608/708 caption stream
  43. pipeline.h264
  44. .pipe(pipeline.captionStream);
  45. pipeline.timestampRollover
  46. .pipe(pipeline.metadataStream);
  47. // ADTS
  48. pipeline.timestampRollover
  49. .pipe(pipeline.adts);
  50. pipeline.elementary.on('data', function(data) {
  51. if (data.type !== 'metadata') {
  52. return;
  53. }
  54. for (var i = 0; i < data.tracks.length; i++) {
  55. if (!pipeline.tracks[data.tracks[i].type]) {
  56. pipeline.tracks[data.tracks[i].type] = data.tracks[i];
  57. pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;
  58. }
  59. }
  60. if (pipeline.tracks.video && !pipeline.videoSegmentStream) {
  61. pipeline.videoSegmentStream = new VideoSegmentStream(pipeline.tracks.video, options);
  62. pipeline.videoSegmentStream.on('timelineStartInfo', function(timelineStartInfo) {
  63. if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {
  64. pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);
  65. }
  66. });
  67. pipeline.videoSegmentStream.on('timingInfo',
  68. pipeline.trigger.bind(pipeline, 'videoTimingInfo'));
  69. pipeline.videoSegmentStream.on('data', function(data) {
  70. pipeline.trigger('data', {
  71. type: 'video',
  72. data: data
  73. });
  74. });
  75. pipeline.videoSegmentStream.on('done',
  76. pipeline.trigger.bind(pipeline, 'done'));
  77. pipeline.videoSegmentStream.on('partialdone',
  78. pipeline.trigger.bind(pipeline, 'partialdone'));
  79. pipeline.videoSegmentStream.on('endedtimeline',
  80. pipeline.trigger.bind(pipeline, 'endedtimeline'));
  81. pipeline.h264
  82. .pipe(pipeline.videoSegmentStream);
  83. }
  84. if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {
  85. pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
  86. pipeline.audioSegmentStream.on('data', function(data) {
  87. pipeline.trigger('data', {
  88. type: 'audio',
  89. data: data
  90. });
  91. });
  92. pipeline.audioSegmentStream.on('done',
  93. pipeline.trigger.bind(pipeline, 'done'));
  94. pipeline.audioSegmentStream.on('partialdone',
  95. pipeline.trigger.bind(pipeline, 'partialdone'));
  96. pipeline.audioSegmentStream.on('endedtimeline',
  97. pipeline.trigger.bind(pipeline, 'endedtimeline'));
  98. pipeline.audioSegmentStream.on('timingInfo',
  99. pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
  100. pipeline.adts
  101. .pipe(pipeline.audioSegmentStream);
  102. }
  103. // emit pmt info
  104. pipeline.trigger('trackinfo', {
  105. hasAudio: !!pipeline.tracks.audio,
  106. hasVideo: !!pipeline.tracks.video
  107. });
  108. });
  109. pipeline.captionStream.on('data', function(caption) {
  110. var timelineStartPts;
  111. if (pipeline.tracks.video) {
  112. timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;
  113. } else {
  114. // This will only happen if we encounter caption packets before
  115. // video data in a segment. This is an unusual/unlikely scenario,
  116. // so we assume the timeline starts at zero for now.
  117. timelineStartPts = 0;
  118. }
  119. // Translate caption PTS times into second offsets into the
  120. // video timeline for the segment
  121. caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);
  122. caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);
  123. pipeline.trigger('caption', caption);
  124. });
  125. pipeline = createPipeline(pipeline);
  126. pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
  127. return pipeline;
  128. };
  129. var aacPipeline = function(options) {
  130. var
  131. pipeline = {
  132. type: 'aac',
  133. tracks: {
  134. audio: null
  135. },
  136. metadataStream: new m2ts.MetadataStream(),
  137. aacStream: new AacStream(),
  138. audioRollover: new m2ts.TimestampRolloverStream('audio'),
  139. timedMetadataRollover: new m2ts.TimestampRolloverStream('timed-metadata'),
  140. adtsStream: new AdtsStream(true)
  141. };
  142. // set up the parsing pipeline
  143. pipeline.headOfPipeline = pipeline.aacStream;
  144. pipeline.aacStream
  145. .pipe(pipeline.audioRollover)
  146. .pipe(pipeline.adtsStream);
  147. pipeline.aacStream
  148. .pipe(pipeline.timedMetadataRollover)
  149. .pipe(pipeline.metadataStream);
  150. pipeline.metadataStream.on('timestamp', function(frame) {
  151. pipeline.aacStream.setTimestamp(frame.timeStamp);
  152. });
  153. pipeline.aacStream.on('data', function(data) {
  154. if ((data.type !== 'timed-metadata' && data.type !== 'audio') || pipeline.audioSegmentStream) {
  155. return;
  156. }
  157. pipeline.tracks.audio = pipeline.tracks.audio || {
  158. timelineStartInfo: {
  159. baseMediaDecodeTime: options.baseMediaDecodeTime
  160. },
  161. codec: 'adts',
  162. type: 'audio'
  163. };
  164. // hook up the audio segment stream to the first track with aac data
  165. pipeline.audioSegmentStream = new AudioSegmentStream(pipeline.tracks.audio, options);
  166. pipeline.audioSegmentStream.on('data', function(data) {
  167. pipeline.trigger('data', {
  168. type: 'audio',
  169. data: data
  170. });
  171. });
  172. pipeline.audioSegmentStream.on('partialdone',
  173. pipeline.trigger.bind(pipeline, 'partialdone'));
  174. pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));
  175. pipeline.audioSegmentStream.on('endedtimeline',
  176. pipeline.trigger.bind(pipeline, 'endedtimeline'));
  177. pipeline.audioSegmentStream.on('timingInfo',
  178. pipeline.trigger.bind(pipeline, 'audioTimingInfo'));
  179. // Set up the final part of the audio pipeline
  180. pipeline.adtsStream
  181. .pipe(pipeline.audioSegmentStream);
  182. pipeline.trigger('trackinfo', {
  183. hasAudio: !!pipeline.tracks.audio,
  184. hasVideo: !!pipeline.tracks.video
  185. });
  186. });
  187. // set the pipeline up as a stream before binding to get access to the trigger function
  188. pipeline = createPipeline(pipeline);
  189. pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));
  190. return pipeline;
  191. };
  192. var setupPipelineListeners = function(pipeline, transmuxer) {
  193. pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));
  194. pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));
  195. pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));
  196. pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));
  197. pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));
  198. pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));
  199. pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));
  200. pipeline.on('id3Frame', function(event) {
  201. // add this to every single emitted segment even though it's only needed for the first
  202. event.dispatchType = pipeline.metadataStream.dispatchType;
  203. // keep original time, can be adjusted if needed at a higher level
  204. event.cueTime = clock.videoTsToSeconds(event.pts);
  205. transmuxer.trigger('id3Frame', event);
  206. });
  207. pipeline.on('caption', function(event) {
  208. transmuxer.trigger('caption', event);
  209. });
  210. };
  211. var Transmuxer = function(options) {
  212. var
  213. pipeline = null,
  214. hasFlushed = true;
  215. options = options || {};
  216. Transmuxer.prototype.init.call(this);
  217. options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
  218. this.push = function(bytes) {
  219. if (hasFlushed) {
  220. var isAac = isLikelyAacData(bytes);
  221. if (isAac && (!pipeline || pipeline.type !== 'aac')) {
  222. pipeline = aacPipeline(options);
  223. setupPipelineListeners(pipeline, this);
  224. } else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {
  225. pipeline = tsPipeline(options);
  226. setupPipelineListeners(pipeline, this);
  227. }
  228. hasFlushed = false;
  229. }
  230. pipeline.headOfPipeline.push(bytes);
  231. };
  232. this.flush = function() {
  233. if (!pipeline) {
  234. return;
  235. }
  236. hasFlushed = true;
  237. pipeline.headOfPipeline.flush();
  238. };
  239. this.partialFlush = function() {
  240. if (!pipeline) {
  241. return;
  242. }
  243. pipeline.headOfPipeline.partialFlush();
  244. };
  245. this.endTimeline = function() {
  246. if (!pipeline) {
  247. return;
  248. }
  249. pipeline.headOfPipeline.endTimeline();
  250. };
  251. this.reset = function() {
  252. if (!pipeline) {
  253. return;
  254. }
  255. pipeline.headOfPipeline.reset();
  256. };
  257. this.setBaseMediaDecodeTime = function(baseMediaDecodeTime) {
  258. if (!options.keepOriginalTimestamps) {
  259. options.baseMediaDecodeTime = baseMediaDecodeTime;
  260. }
  261. if (!pipeline) {
  262. return;
  263. }
  264. if (pipeline.tracks.audio) {
  265. pipeline.tracks.audio.timelineStartInfo.dts = undefined;
  266. pipeline.tracks.audio.timelineStartInfo.pts = undefined;
  267. trackInfo.clearDtsInfo(pipeline.tracks.audio);
  268. if (pipeline.audioRollover) {
  269. pipeline.audioRollover.discontinuity();
  270. }
  271. }
  272. if (pipeline.tracks.video) {
  273. if (pipeline.videoSegmentStream) {
  274. pipeline.videoSegmentStream.gopCache_ = [];
  275. }
  276. pipeline.tracks.video.timelineStartInfo.dts = undefined;
  277. pipeline.tracks.video.timelineStartInfo.pts = undefined;
  278. trackInfo.clearDtsInfo(pipeline.tracks.video);
  279. // pipeline.captionStream.reset();
  280. }
  281. if (pipeline.timestampRollover) {
  282. pipeline.timestampRollover.discontinuity();
  283. }
  284. };
  285. this.setRemux = function(val) {
  286. options.remux = val;
  287. if (pipeline && pipeline.coalesceStream) {
  288. pipeline.coalesceStream.setRemux(val);
  289. }
  290. };
  291. this.setAudioAppendStart = function(audioAppendStart) {
  292. if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {
  293. return;
  294. }
  295. pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);
  296. };
  297. // TODO GOP alignment support
  298. // Support may be a bit trickier than with full segment appends, as GOPs may be split
  299. // and processed in a more granular fashion
  300. this.alignGopsWith = function(gopsToAlignWith) {
  301. return;
  302. };
  303. };
  304. Transmuxer.prototype = new Stream();
  305. module.exports = Transmuxer;